xref: /openbmc/qemu/hw/i386/xen/xen-hvm.c (revision 21d87050)
1 /*
2  * Copyright (C) 2010       Citrix Ltd.
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.  See
5  * the COPYING file in the top-level directory.
6  *
7  * Contributions after 2012-01-13 are licensed under the terms of the
8  * GNU GPL, version 2 or (at your option) any later version.
9  */
10 
11 #include "qemu/osdep.h"
12 #include "qemu/units.h"
13 
14 #include "cpu.h"
15 #include "hw/pci/pci.h"
16 #include "hw/pci/pci_host.h"
17 #include "hw/i386/pc.h"
18 #include "hw/southbridge/piix.h"
19 #include "hw/irq.h"
20 #include "hw/hw.h"
21 #include "hw/i386/apic-msidef.h"
22 #include "hw/xen/xen_common.h"
23 #include "hw/xen/xen-legacy-backend.h"
24 #include "hw/xen/xen-bus.h"
25 #include "hw/xen/xen-x86.h"
26 #include "qapi/error.h"
27 #include "qapi/qapi-commands-migration.h"
28 #include "qemu/error-report.h"
29 #include "qemu/main-loop.h"
30 #include "qemu/range.h"
31 #include "sysemu/runstate.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/xen.h"
34 #include "sysemu/xen-mapcache.h"
35 #include "trace.h"
36 
37 #include <xen/hvm/ioreq.h>
38 #include <xen/hvm/e820.h>
39 
40 //#define DEBUG_XEN_HVM
41 
42 #ifdef DEBUG_XEN_HVM
43 #define DPRINTF(fmt, ...) \
44     do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
45 #else
46 #define DPRINTF(fmt, ...) \
47     do { } while (0)
48 #endif
49 
50 static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
51 static MemoryRegion *framebuffer;
52 static bool xen_in_migration;
53 
54 /* Compatibility with older version */
55 
56 /* This allows QEMU to build on a system that has Xen 4.5 or earlier
57  * installed.  This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
58  * needs to be included before this block and hw/xen/xen_common.h needs to
59  * be included before xen/hvm/ioreq.h
60  */
61 #ifndef IOREQ_TYPE_VMWARE_PORT
62 #define IOREQ_TYPE_VMWARE_PORT  3
63 struct vmware_regs {
64     uint32_t esi;
65     uint32_t edi;
66     uint32_t ebx;
67     uint32_t ecx;
68     uint32_t edx;
69 };
70 typedef struct vmware_regs vmware_regs_t;
71 
72 struct shared_vmport_iopage {
73     struct vmware_regs vcpu_vmport_regs[1];
74 };
75 typedef struct shared_vmport_iopage shared_vmport_iopage_t;
76 #endif
77 
78 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
79 {
80     return shared_page->vcpu_ioreq[i].vp_eport;
81 }
82 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
83 {
84     return &shared_page->vcpu_ioreq[vcpu];
85 }
86 
87 #define BUFFER_IO_MAX_DELAY  100
88 
89 typedef struct XenPhysmap {
90     hwaddr start_addr;
91     ram_addr_t size;
92     const char *name;
93     hwaddr phys_offset;
94 
95     QLIST_ENTRY(XenPhysmap) list;
96 } XenPhysmap;
97 
98 static QLIST_HEAD(, XenPhysmap) xen_physmap;
99 
100 typedef struct XenPciDevice {
101     PCIDevice *pci_dev;
102     uint32_t sbdf;
103     QLIST_ENTRY(XenPciDevice) entry;
104 } XenPciDevice;
105 
106 typedef struct XenIOState {
107     ioservid_t ioservid;
108     shared_iopage_t *shared_page;
109     shared_vmport_iopage_t *shared_vmport_page;
110     buffered_iopage_t *buffered_io_page;
111     xenforeignmemory_resource_handle *fres;
112     QEMUTimer *buffered_io_timer;
113     CPUState **cpu_by_vcpu_id;
114     /* the evtchn port for polling the notification, */
115     evtchn_port_t *ioreq_local_port;
116     /* evtchn remote and local ports for buffered io */
117     evtchn_port_t bufioreq_remote_port;
118     evtchn_port_t bufioreq_local_port;
119     /* the evtchn fd for polling */
120     xenevtchn_handle *xce_handle;
121     /* which vcpu we are serving */
122     int send_vcpu;
123 
124     struct xs_handle *xenstore;
125     MemoryListener memory_listener;
126     MemoryListener io_listener;
127     QLIST_HEAD(, XenPciDevice) dev_list;
128     DeviceListener device_listener;
129     hwaddr free_phys_offset;
130     const XenPhysmap *log_for_dirtybit;
131     /* Buffer used by xen_sync_dirty_bitmap */
132     unsigned long *dirty_bitmap;
133 
134     Notifier exit;
135     Notifier suspend;
136     Notifier wakeup;
137 } XenIOState;
138 
139 /* Xen specific function for piix pci */
140 
141 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
142 {
143     return irq_num + (PCI_SLOT(pci_dev->devfn) << 2);
144 }
145 
146 void xen_piix3_set_irq(void *opaque, int irq_num, int level)
147 {
148     xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2,
149                            irq_num & 3, level);
150 }
151 
152 void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
153 {
154     int i;
155 
156     /* Scan for updates to PCI link routes (0x60-0x63). */
157     for (i = 0; i < len; i++) {
158         uint8_t v = (val >> (8 * i)) & 0xff;
159         if (v & 0x80) {
160             v = 0;
161         }
162         v &= 0xf;
163         if (((address + i) >= PIIX_PIRQCA) && ((address + i) <= PIIX_PIRQCD)) {
164             xen_set_pci_link_route(address + i - PIIX_PIRQCA, v);
165         }
166     }
167 }
168 
169 int xen_set_pci_link_route(uint8_t link, uint8_t irq)
170 {
171     return xendevicemodel_set_pci_link_route(xen_dmod, xen_domid, link, irq);
172 }
173 
174 int xen_is_pirq_msi(uint32_t msi_data)
175 {
176     /* If vector is 0, the msi is remapped into a pirq, passed as
177      * dest_id.
178      */
179     return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0;
180 }
181 
182 void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
183 {
184     xen_inject_msi(xen_domid, addr, data);
185 }
186 
187 static void xen_suspend_notifier(Notifier *notifier, void *data)
188 {
189     xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
190 }
191 
192 /* Xen Interrupt Controller */
193 
194 static void xen_set_irq(void *opaque, int irq, int level)
195 {
196     xen_set_isa_irq_level(xen_domid, irq, level);
197 }
198 
199 qemu_irq *xen_interrupt_controller_init(void)
200 {
201     return qemu_allocate_irqs(xen_set_irq, NULL, 16);
202 }
203 
204 /* Memory Ops */
205 
206 static void xen_ram_init(PCMachineState *pcms,
207                          ram_addr_t ram_size, MemoryRegion **ram_memory_p)
208 {
209     X86MachineState *x86ms = X86_MACHINE(pcms);
210     MemoryRegion *sysmem = get_system_memory();
211     ram_addr_t block_len;
212     uint64_t user_lowmem =
213         object_property_get_uint(qdev_get_machine(),
214                                  PC_MACHINE_MAX_RAM_BELOW_4G,
215                                  &error_abort);
216 
217     /* Handle the machine opt max-ram-below-4g.  It is basically doing
218      * min(xen limit, user limit).
219      */
220     if (!user_lowmem) {
221         user_lowmem = HVM_BELOW_4G_RAM_END; /* default */
222     }
223     if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
224         user_lowmem = HVM_BELOW_4G_RAM_END;
225     }
226 
227     if (ram_size >= user_lowmem) {
228         x86ms->above_4g_mem_size = ram_size - user_lowmem;
229         x86ms->below_4g_mem_size = user_lowmem;
230     } else {
231         x86ms->above_4g_mem_size = 0;
232         x86ms->below_4g_mem_size = ram_size;
233     }
234     if (!x86ms->above_4g_mem_size) {
235         block_len = ram_size;
236     } else {
237         /*
238          * Xen does not allocate the memory continuously, it keeps a
239          * hole of the size computed above or passed in.
240          */
241         block_len = (4 * GiB) + x86ms->above_4g_mem_size;
242     }
243     memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len,
244                            &error_fatal);
245     *ram_memory_p = &ram_memory;
246 
247     memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
248                              &ram_memory, 0, 0xa0000);
249     memory_region_add_subregion(sysmem, 0, &ram_640k);
250     /* Skip of the VGA IO memory space, it will be registered later by the VGA
251      * emulated device.
252      *
253      * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
254      * the Options ROM, so it is registered here as RAM.
255      */
256     memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
257                              &ram_memory, 0xc0000,
258                              x86ms->below_4g_mem_size - 0xc0000);
259     memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
260     if (x86ms->above_4g_mem_size > 0) {
261         memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
262                                  &ram_memory, 0x100000000ULL,
263                                  x86ms->above_4g_mem_size);
264         memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
265     }
266 }
267 
268 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
269                    Error **errp)
270 {
271     unsigned long nr_pfn;
272     xen_pfn_t *pfn_list;
273     int i;
274 
275     if (runstate_check(RUN_STATE_INMIGRATE)) {
276         /* RAM already populated in Xen */
277         fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
278                 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
279                 __func__, size, ram_addr);
280         return;
281     }
282 
283     if (mr == &ram_memory) {
284         return;
285     }
286 
287     trace_xen_ram_alloc(ram_addr, size);
288 
289     nr_pfn = size >> TARGET_PAGE_BITS;
290     pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
291 
292     for (i = 0; i < nr_pfn; i++) {
293         pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
294     }
295 
296     if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
297         error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
298                    ram_addr);
299     }
300 
301     g_free(pfn_list);
302 }
303 
304 static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size)
305 {
306     XenPhysmap *physmap = NULL;
307 
308     start_addr &= TARGET_PAGE_MASK;
309 
310     QLIST_FOREACH(physmap, &xen_physmap, list) {
311         if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
312             return physmap;
313         }
314     }
315     return NULL;
316 }
317 
318 static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size)
319 {
320     hwaddr addr = phys_offset & TARGET_PAGE_MASK;
321     XenPhysmap *physmap = NULL;
322 
323     QLIST_FOREACH(physmap, &xen_physmap, list) {
324         if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
325             return physmap->start_addr + (phys_offset - physmap->phys_offset);
326         }
327     }
328 
329     return phys_offset;
330 }
331 
332 #ifdef XEN_COMPAT_PHYSMAP
333 static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
334 {
335     char path[80], value[17];
336 
337     snprintf(path, sizeof(path),
338             "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
339             xen_domid, (uint64_t)physmap->phys_offset);
340     snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr);
341     if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
342         return -1;
343     }
344     snprintf(path, sizeof(path),
345             "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
346             xen_domid, (uint64_t)physmap->phys_offset);
347     snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size);
348     if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
349         return -1;
350     }
351     if (physmap->name) {
352         snprintf(path, sizeof(path),
353                 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
354                 xen_domid, (uint64_t)physmap->phys_offset);
355         if (!xs_write(state->xenstore, 0, path,
356                       physmap->name, strlen(physmap->name))) {
357             return -1;
358         }
359     }
360     return 0;
361 }
362 #else
363 static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
364 {
365     return 0;
366 }
367 #endif
368 
369 static int xen_add_to_physmap(XenIOState *state,
370                               hwaddr start_addr,
371                               ram_addr_t size,
372                               MemoryRegion *mr,
373                               hwaddr offset_within_region)
374 {
375     unsigned long nr_pages;
376     int rc = 0;
377     XenPhysmap *physmap = NULL;
378     hwaddr pfn, start_gpfn;
379     hwaddr phys_offset = memory_region_get_ram_addr(mr);
380     const char *mr_name;
381 
382     if (get_physmapping(start_addr, size)) {
383         return 0;
384     }
385     if (size <= 0) {
386         return -1;
387     }
388 
389     /* Xen can only handle a single dirty log region for now and we want
390      * the linear framebuffer to be that region.
391      * Avoid tracking any regions that is not videoram and avoid tracking
392      * the legacy vga region. */
393     if (mr == framebuffer && start_addr > 0xbffff) {
394         goto go_physmap;
395     }
396     return -1;
397 
398 go_physmap:
399     DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
400             start_addr, start_addr + size);
401 
402     mr_name = memory_region_name(mr);
403 
404     physmap = g_new(XenPhysmap, 1);
405 
406     physmap->start_addr = start_addr;
407     physmap->size = size;
408     physmap->name = mr_name;
409     physmap->phys_offset = phys_offset;
410 
411     QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
412 
413     if (runstate_check(RUN_STATE_INMIGRATE)) {
414         /* Now when we have a physmap entry we can replace a dummy mapping with
415          * a real one of guest foreign memory. */
416         uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size);
417         assert(p && p == memory_region_get_ram_ptr(mr));
418 
419         return 0;
420     }
421 
422     pfn = phys_offset >> TARGET_PAGE_BITS;
423     start_gpfn = start_addr >> TARGET_PAGE_BITS;
424     nr_pages = size >> TARGET_PAGE_BITS;
425     rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn,
426                                         start_gpfn);
427     if (rc) {
428         int saved_errno = errno;
429 
430         error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx
431                      " to GFN %"HWADDR_PRIx" failed: %s",
432                      nr_pages, pfn, start_gpfn, strerror(saved_errno));
433         errno = saved_errno;
434         return -1;
435     }
436 
437     rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid,
438                                    start_addr >> TARGET_PAGE_BITS,
439                                    (start_addr + size - 1) >> TARGET_PAGE_BITS,
440                                    XEN_DOMCTL_MEM_CACHEATTR_WB);
441     if (rc) {
442         error_report("pin_memory_cacheattr failed: %s", strerror(errno));
443     }
444     return xen_save_physmap(state, physmap);
445 }
446 
447 static int xen_remove_from_physmap(XenIOState *state,
448                                    hwaddr start_addr,
449                                    ram_addr_t size)
450 {
451     int rc = 0;
452     XenPhysmap *physmap = NULL;
453     hwaddr phys_offset = 0;
454 
455     physmap = get_physmapping(start_addr, size);
456     if (physmap == NULL) {
457         return -1;
458     }
459 
460     phys_offset = physmap->phys_offset;
461     size = physmap->size;
462 
463     DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
464             "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
465 
466     size >>= TARGET_PAGE_BITS;
467     start_addr >>= TARGET_PAGE_BITS;
468     phys_offset >>= TARGET_PAGE_BITS;
469     rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr,
470                                         phys_offset);
471     if (rc) {
472         int saved_errno = errno;
473 
474         error_report("relocate_memory "RAM_ADDR_FMT" pages"
475                      " from GFN %"HWADDR_PRIx
476                      " to GFN %"HWADDR_PRIx" failed: %s",
477                      size, start_addr, phys_offset, strerror(saved_errno));
478         errno = saved_errno;
479         return -1;
480     }
481 
482     QLIST_REMOVE(physmap, list);
483     if (state->log_for_dirtybit == physmap) {
484         state->log_for_dirtybit = NULL;
485         g_free(state->dirty_bitmap);
486         state->dirty_bitmap = NULL;
487     }
488     g_free(physmap);
489 
490     return 0;
491 }
492 
493 static void xen_set_memory(struct MemoryListener *listener,
494                            MemoryRegionSection *section,
495                            bool add)
496 {
497     XenIOState *state = container_of(listener, XenIOState, memory_listener);
498     hwaddr start_addr = section->offset_within_address_space;
499     ram_addr_t size = int128_get64(section->size);
500     bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
501     hvmmem_type_t mem_type;
502 
503     if (section->mr == &ram_memory) {
504         return;
505     } else {
506         if (add) {
507             xen_map_memory_section(xen_domid, state->ioservid,
508                                    section);
509         } else {
510             xen_unmap_memory_section(xen_domid, state->ioservid,
511                                      section);
512         }
513     }
514 
515     if (!memory_region_is_ram(section->mr)) {
516         return;
517     }
518 
519     if (log_dirty != add) {
520         return;
521     }
522 
523     trace_xen_client_set_memory(start_addr, size, log_dirty);
524 
525     start_addr &= TARGET_PAGE_MASK;
526     size = TARGET_PAGE_ALIGN(size);
527 
528     if (add) {
529         if (!memory_region_is_rom(section->mr)) {
530             xen_add_to_physmap(state, start_addr, size,
531                                section->mr, section->offset_within_region);
532         } else {
533             mem_type = HVMMEM_ram_ro;
534             if (xen_set_mem_type(xen_domid, mem_type,
535                                  start_addr >> TARGET_PAGE_BITS,
536                                  size >> TARGET_PAGE_BITS)) {
537                 DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n",
538                         start_addr);
539             }
540         }
541     } else {
542         if (xen_remove_from_physmap(state, start_addr, size) < 0) {
543             DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
544         }
545     }
546 }
547 
548 static void xen_region_add(MemoryListener *listener,
549                            MemoryRegionSection *section)
550 {
551     memory_region_ref(section->mr);
552     xen_set_memory(listener, section, true);
553 }
554 
555 static void xen_region_del(MemoryListener *listener,
556                            MemoryRegionSection *section)
557 {
558     xen_set_memory(listener, section, false);
559     memory_region_unref(section->mr);
560 }
561 
562 static void xen_io_add(MemoryListener *listener,
563                        MemoryRegionSection *section)
564 {
565     XenIOState *state = container_of(listener, XenIOState, io_listener);
566     MemoryRegion *mr = section->mr;
567 
568     if (mr->ops == &unassigned_io_ops) {
569         return;
570     }
571 
572     memory_region_ref(mr);
573 
574     xen_map_io_section(xen_domid, state->ioservid, section);
575 }
576 
577 static void xen_io_del(MemoryListener *listener,
578                        MemoryRegionSection *section)
579 {
580     XenIOState *state = container_of(listener, XenIOState, io_listener);
581     MemoryRegion *mr = section->mr;
582 
583     if (mr->ops == &unassigned_io_ops) {
584         return;
585     }
586 
587     xen_unmap_io_section(xen_domid, state->ioservid, section);
588 
589     memory_region_unref(mr);
590 }
591 
592 static void xen_device_realize(DeviceListener *listener,
593                                DeviceState *dev)
594 {
595     XenIOState *state = container_of(listener, XenIOState, device_listener);
596 
597     if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
598         PCIDevice *pci_dev = PCI_DEVICE(dev);
599         XenPciDevice *xendev = g_new(XenPciDevice, 1);
600 
601         xendev->pci_dev = pci_dev;
602         xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
603                                      pci_dev->devfn);
604         QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
605 
606         xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
607     }
608 }
609 
610 static void xen_device_unrealize(DeviceListener *listener,
611                                  DeviceState *dev)
612 {
613     XenIOState *state = container_of(listener, XenIOState, device_listener);
614 
615     if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
616         PCIDevice *pci_dev = PCI_DEVICE(dev);
617         XenPciDevice *xendev, *next;
618 
619         xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
620 
621         QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
622             if (xendev->pci_dev == pci_dev) {
623                 QLIST_REMOVE(xendev, entry);
624                 g_free(xendev);
625                 break;
626             }
627         }
628     }
629 }
630 
631 static void xen_sync_dirty_bitmap(XenIOState *state,
632                                   hwaddr start_addr,
633                                   ram_addr_t size)
634 {
635     hwaddr npages = size >> TARGET_PAGE_BITS;
636     const int width = sizeof(unsigned long) * 8;
637     size_t bitmap_size = DIV_ROUND_UP(npages, width);
638     int rc, i, j;
639     const XenPhysmap *physmap = NULL;
640 
641     physmap = get_physmapping(start_addr, size);
642     if (physmap == NULL) {
643         /* not handled */
644         return;
645     }
646 
647     if (state->log_for_dirtybit == NULL) {
648         state->log_for_dirtybit = physmap;
649         state->dirty_bitmap = g_new(unsigned long, bitmap_size);
650     } else if (state->log_for_dirtybit != physmap) {
651         /* Only one range for dirty bitmap can be tracked. */
652         return;
653     }
654 
655     rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS,
656                               npages, state->dirty_bitmap);
657     if (rc < 0) {
658 #ifndef ENODATA
659 #define ENODATA  ENOENT
660 #endif
661         if (errno == ENODATA) {
662             memory_region_set_dirty(framebuffer, 0, size);
663             DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
664                     ", 0x" TARGET_FMT_plx "): %s\n",
665                     start_addr, start_addr + size, strerror(errno));
666         }
667         return;
668     }
669 
670     for (i = 0; i < bitmap_size; i++) {
671         unsigned long map = state->dirty_bitmap[i];
672         while (map != 0) {
673             j = ctzl(map);
674             map &= ~(1ul << j);
675             memory_region_set_dirty(framebuffer,
676                                     (i * width + j) * TARGET_PAGE_SIZE,
677                                     TARGET_PAGE_SIZE);
678         };
679     }
680 }
681 
682 static void xen_log_start(MemoryListener *listener,
683                           MemoryRegionSection *section,
684                           int old, int new)
685 {
686     XenIOState *state = container_of(listener, XenIOState, memory_listener);
687 
688     if (new & ~old & (1 << DIRTY_MEMORY_VGA)) {
689         xen_sync_dirty_bitmap(state, section->offset_within_address_space,
690                               int128_get64(section->size));
691     }
692 }
693 
694 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
695                          int old, int new)
696 {
697     XenIOState *state = container_of(listener, XenIOState, memory_listener);
698 
699     if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
700         state->log_for_dirtybit = NULL;
701         g_free(state->dirty_bitmap);
702         state->dirty_bitmap = NULL;
703         /* Disable dirty bit tracking */
704         xen_track_dirty_vram(xen_domid, 0, 0, NULL);
705     }
706 }
707 
708 static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
709 {
710     XenIOState *state = container_of(listener, XenIOState, memory_listener);
711 
712     xen_sync_dirty_bitmap(state, section->offset_within_address_space,
713                           int128_get64(section->size));
714 }
715 
716 static void xen_log_global_start(MemoryListener *listener)
717 {
718     if (xen_enabled()) {
719         xen_in_migration = true;
720     }
721 }
722 
723 static void xen_log_global_stop(MemoryListener *listener)
724 {
725     xen_in_migration = false;
726 }
727 
728 static MemoryListener xen_memory_listener = {
729     .name = "xen-memory",
730     .region_add = xen_region_add,
731     .region_del = xen_region_del,
732     .log_start = xen_log_start,
733     .log_stop = xen_log_stop,
734     .log_sync = xen_log_sync,
735     .log_global_start = xen_log_global_start,
736     .log_global_stop = xen_log_global_stop,
737     .priority = 10,
738 };
739 
740 static MemoryListener xen_io_listener = {
741     .name = "xen-io",
742     .region_add = xen_io_add,
743     .region_del = xen_io_del,
744     .priority = 10,
745 };
746 
747 static DeviceListener xen_device_listener = {
748     .realize = xen_device_realize,
749     .unrealize = xen_device_unrealize,
750 };
751 
752 /* get the ioreq packets from share mem */
753 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
754 {
755     ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
756 
757     if (req->state != STATE_IOREQ_READY) {
758         DPRINTF("I/O request not ready: "
759                 "%x, ptr: %x, port: %"PRIx64", "
760                 "data: %"PRIx64", count: %u, size: %u\n",
761                 req->state, req->data_is_ptr, req->addr,
762                 req->data, req->count, req->size);
763         return NULL;
764     }
765 
766     xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
767 
768     req->state = STATE_IOREQ_INPROCESS;
769     return req;
770 }
771 
772 /* use poll to get the port notification */
773 /* ioreq_vec--out,the */
774 /* retval--the number of ioreq packet */
775 static ioreq_t *cpu_get_ioreq(XenIOState *state)
776 {
777     MachineState *ms = MACHINE(qdev_get_machine());
778     unsigned int max_cpus = ms->smp.max_cpus;
779     int i;
780     evtchn_port_t port;
781 
782     port = xenevtchn_pending(state->xce_handle);
783     if (port == state->bufioreq_local_port) {
784         timer_mod(state->buffered_io_timer,
785                 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
786         return NULL;
787     }
788 
789     if (port != -1) {
790         for (i = 0; i < max_cpus; i++) {
791             if (state->ioreq_local_port[i] == port) {
792                 break;
793             }
794         }
795 
796         if (i == max_cpus) {
797             hw_error("Fatal error while trying to get io event!\n");
798         }
799 
800         /* unmask the wanted port again */
801         xenevtchn_unmask(state->xce_handle, port);
802 
803         /* get the io packet from shared memory */
804         state->send_vcpu = i;
805         return cpu_get_ioreq_from_shared_memory(state, i);
806     }
807 
808     /* read error or read nothing */
809     return NULL;
810 }
811 
812 static uint32_t do_inp(uint32_t addr, unsigned long size)
813 {
814     switch (size) {
815         case 1:
816             return cpu_inb(addr);
817         case 2:
818             return cpu_inw(addr);
819         case 4:
820             return cpu_inl(addr);
821         default:
822             hw_error("inp: bad size: %04x %lx", addr, size);
823     }
824 }
825 
826 static void do_outp(uint32_t addr,
827         unsigned long size, uint32_t val)
828 {
829     switch (size) {
830         case 1:
831             return cpu_outb(addr, val);
832         case 2:
833             return cpu_outw(addr, val);
834         case 4:
835             return cpu_outl(addr, val);
836         default:
837             hw_error("outp: bad size: %04x %lx", addr, size);
838     }
839 }
840 
841 /*
842  * Helper functions which read/write an object from/to physical guest
843  * memory, as part of the implementation of an ioreq.
844  *
845  * Equivalent to
846  *   cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
847  *                          val, req->size, 0/1)
848  * except without the integer overflow problems.
849  */
850 static void rw_phys_req_item(hwaddr addr,
851                              ioreq_t *req, uint32_t i, void *val, int rw)
852 {
853     /* Do everything unsigned so overflow just results in a truncated result
854      * and accesses to undesired parts of guest memory, which is up
855      * to the guest */
856     hwaddr offset = (hwaddr)req->size * i;
857     if (req->df) {
858         addr -= offset;
859     } else {
860         addr += offset;
861     }
862     cpu_physical_memory_rw(addr, val, req->size, rw);
863 }
864 
865 static inline void read_phys_req_item(hwaddr addr,
866                                       ioreq_t *req, uint32_t i, void *val)
867 {
868     rw_phys_req_item(addr, req, i, val, 0);
869 }
870 static inline void write_phys_req_item(hwaddr addr,
871                                        ioreq_t *req, uint32_t i, void *val)
872 {
873     rw_phys_req_item(addr, req, i, val, 1);
874 }
875 
876 
877 static void cpu_ioreq_pio(ioreq_t *req)
878 {
879     uint32_t i;
880 
881     trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
882                          req->data, req->count, req->size);
883 
884     if (req->size > sizeof(uint32_t)) {
885         hw_error("PIO: bad size (%u)", req->size);
886     }
887 
888     if (req->dir == IOREQ_READ) {
889         if (!req->data_is_ptr) {
890             req->data = do_inp(req->addr, req->size);
891             trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
892                                          req->size);
893         } else {
894             uint32_t tmp;
895 
896             for (i = 0; i < req->count; i++) {
897                 tmp = do_inp(req->addr, req->size);
898                 write_phys_req_item(req->data, req, i, &tmp);
899             }
900         }
901     } else if (req->dir == IOREQ_WRITE) {
902         if (!req->data_is_ptr) {
903             trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
904                                           req->size);
905             do_outp(req->addr, req->size, req->data);
906         } else {
907             for (i = 0; i < req->count; i++) {
908                 uint32_t tmp = 0;
909 
910                 read_phys_req_item(req->data, req, i, &tmp);
911                 do_outp(req->addr, req->size, tmp);
912             }
913         }
914     }
915 }
916 
917 static void cpu_ioreq_move(ioreq_t *req)
918 {
919     uint32_t i;
920 
921     trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
922                          req->data, req->count, req->size);
923 
924     if (req->size > sizeof(req->data)) {
925         hw_error("MMIO: bad size (%u)", req->size);
926     }
927 
928     if (!req->data_is_ptr) {
929         if (req->dir == IOREQ_READ) {
930             for (i = 0; i < req->count; i++) {
931                 read_phys_req_item(req->addr, req, i, &req->data);
932             }
933         } else if (req->dir == IOREQ_WRITE) {
934             for (i = 0; i < req->count; i++) {
935                 write_phys_req_item(req->addr, req, i, &req->data);
936             }
937         }
938     } else {
939         uint64_t tmp;
940 
941         if (req->dir == IOREQ_READ) {
942             for (i = 0; i < req->count; i++) {
943                 read_phys_req_item(req->addr, req, i, &tmp);
944                 write_phys_req_item(req->data, req, i, &tmp);
945             }
946         } else if (req->dir == IOREQ_WRITE) {
947             for (i = 0; i < req->count; i++) {
948                 read_phys_req_item(req->data, req, i, &tmp);
949                 write_phys_req_item(req->addr, req, i, &tmp);
950             }
951         }
952     }
953 }
954 
955 static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
956 {
957     uint32_t sbdf = req->addr >> 32;
958     uint32_t reg = req->addr;
959     XenPciDevice *xendev;
960 
961     if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
962         req->size != sizeof(uint32_t)) {
963         hw_error("PCI config access: bad size (%u)", req->size);
964     }
965 
966     if (req->count != 1) {
967         hw_error("PCI config access: bad count (%u)", req->count);
968     }
969 
970     QLIST_FOREACH(xendev, &state->dev_list, entry) {
971         if (xendev->sbdf != sbdf) {
972             continue;
973         }
974 
975         if (!req->data_is_ptr) {
976             if (req->dir == IOREQ_READ) {
977                 req->data = pci_host_config_read_common(
978                     xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
979                     req->size);
980                 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
981                                             req->size, req->data);
982             } else if (req->dir == IOREQ_WRITE) {
983                 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
984                                              req->size, req->data);
985                 pci_host_config_write_common(
986                     xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
987                     req->data, req->size);
988             }
989         } else {
990             uint32_t tmp;
991 
992             if (req->dir == IOREQ_READ) {
993                 tmp = pci_host_config_read_common(
994                     xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
995                     req->size);
996                 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
997                                             req->size, tmp);
998                 write_phys_req_item(req->data, req, 0, &tmp);
999             } else if (req->dir == IOREQ_WRITE) {
1000                 read_phys_req_item(req->data, req, 0, &tmp);
1001                 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
1002                                              req->size, tmp);
1003                 pci_host_config_write_common(
1004                     xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
1005                     tmp, req->size);
1006             }
1007         }
1008     }
1009 }
1010 
1011 static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
1012 {
1013     X86CPU *cpu;
1014     CPUX86State *env;
1015 
1016     cpu = X86_CPU(current_cpu);
1017     env = &cpu->env;
1018     env->regs[R_EAX] = req->data;
1019     env->regs[R_EBX] = vmport_regs->ebx;
1020     env->regs[R_ECX] = vmport_regs->ecx;
1021     env->regs[R_EDX] = vmport_regs->edx;
1022     env->regs[R_ESI] = vmport_regs->esi;
1023     env->regs[R_EDI] = vmport_regs->edi;
1024 }
1025 
1026 static void regs_from_cpu(vmware_regs_t *vmport_regs)
1027 {
1028     X86CPU *cpu = X86_CPU(current_cpu);
1029     CPUX86State *env = &cpu->env;
1030 
1031     vmport_regs->ebx = env->regs[R_EBX];
1032     vmport_regs->ecx = env->regs[R_ECX];
1033     vmport_regs->edx = env->regs[R_EDX];
1034     vmport_regs->esi = env->regs[R_ESI];
1035     vmport_regs->edi = env->regs[R_EDI];
1036 }
1037 
1038 static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
1039 {
1040     vmware_regs_t *vmport_regs;
1041 
1042     assert(state->shared_vmport_page);
1043     vmport_regs =
1044         &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
1045     QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
1046 
1047     current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
1048     regs_to_cpu(vmport_regs, req);
1049     cpu_ioreq_pio(req);
1050     regs_from_cpu(vmport_regs);
1051     current_cpu = NULL;
1052 }
1053 
1054 static void handle_ioreq(XenIOState *state, ioreq_t *req)
1055 {
1056     trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
1057                        req->addr, req->data, req->count, req->size);
1058 
1059     if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
1060             (req->size < sizeof (target_ulong))) {
1061         req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
1062     }
1063 
1064     if (req->dir == IOREQ_WRITE)
1065         trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
1066                                  req->addr, req->data, req->count, req->size);
1067 
1068     switch (req->type) {
1069         case IOREQ_TYPE_PIO:
1070             cpu_ioreq_pio(req);
1071             break;
1072         case IOREQ_TYPE_COPY:
1073             cpu_ioreq_move(req);
1074             break;
1075         case IOREQ_TYPE_VMWARE_PORT:
1076             handle_vmport_ioreq(state, req);
1077             break;
1078         case IOREQ_TYPE_TIMEOFFSET:
1079             break;
1080         case IOREQ_TYPE_INVALIDATE:
1081             xen_invalidate_map_cache();
1082             break;
1083         case IOREQ_TYPE_PCI_CONFIG:
1084             cpu_ioreq_config(state, req);
1085             break;
1086         default:
1087             hw_error("Invalid ioreq type 0x%x\n", req->type);
1088     }
1089     if (req->dir == IOREQ_READ) {
1090         trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
1091                                 req->addr, req->data, req->count, req->size);
1092     }
1093 }
1094 
1095 static bool handle_buffered_iopage(XenIOState *state)
1096 {
1097     buffered_iopage_t *buf_page = state->buffered_io_page;
1098     buf_ioreq_t *buf_req = NULL;
1099     bool handled_ioreq = false;
1100     ioreq_t req;
1101     int qw;
1102 
1103     if (!buf_page) {
1104         return 0;
1105     }
1106 
1107     memset(&req, 0x00, sizeof(req));
1108     req.state = STATE_IOREQ_READY;
1109     req.count = 1;
1110     req.dir = IOREQ_WRITE;
1111 
1112     for (;;) {
1113         uint32_t rdptr = buf_page->read_pointer, wrptr;
1114 
1115         xen_rmb();
1116         wrptr = buf_page->write_pointer;
1117         xen_rmb();
1118         if (rdptr != buf_page->read_pointer) {
1119             continue;
1120         }
1121         if (rdptr == wrptr) {
1122             break;
1123         }
1124         buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
1125         req.size = 1U << buf_req->size;
1126         req.addr = buf_req->addr;
1127         req.data = buf_req->data;
1128         req.type = buf_req->type;
1129         xen_rmb();
1130         qw = (req.size == 8);
1131         if (qw) {
1132             if (rdptr + 1 == wrptr) {
1133                 hw_error("Incomplete quad word buffered ioreq");
1134             }
1135             buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
1136                                            IOREQ_BUFFER_SLOT_NUM];
1137             req.data |= ((uint64_t)buf_req->data) << 32;
1138             xen_rmb();
1139         }
1140 
1141         handle_ioreq(state, &req);
1142 
1143         /* Only req.data may get updated by handle_ioreq(), albeit even that
1144          * should not happen as such data would never make it to the guest (we
1145          * can only usefully see writes here after all).
1146          */
1147         assert(req.state == STATE_IOREQ_READY);
1148         assert(req.count == 1);
1149         assert(req.dir == IOREQ_WRITE);
1150         assert(!req.data_is_ptr);
1151 
1152         qatomic_add(&buf_page->read_pointer, qw + 1);
1153         handled_ioreq = true;
1154     }
1155 
1156     return handled_ioreq;
1157 }
1158 
1159 static void handle_buffered_io(void *opaque)
1160 {
1161     XenIOState *state = opaque;
1162 
1163     if (handle_buffered_iopage(state)) {
1164         timer_mod(state->buffered_io_timer,
1165                 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
1166     } else {
1167         timer_del(state->buffered_io_timer);
1168         xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port);
1169     }
1170 }
1171 
1172 static void cpu_handle_ioreq(void *opaque)
1173 {
1174     XenIOState *state = opaque;
1175     ioreq_t *req = cpu_get_ioreq(state);
1176 
1177     handle_buffered_iopage(state);
1178     if (req) {
1179         ioreq_t copy = *req;
1180 
1181         xen_rmb();
1182         handle_ioreq(state, &copy);
1183         req->data = copy.data;
1184 
1185         if (req->state != STATE_IOREQ_INPROCESS) {
1186             fprintf(stderr, "Badness in I/O request ... not in service?!: "
1187                     "%x, ptr: %x, port: %"PRIx64", "
1188                     "data: %"PRIx64", count: %u, size: %u, type: %u\n",
1189                     req->state, req->data_is_ptr, req->addr,
1190                     req->data, req->count, req->size, req->type);
1191             destroy_hvm_domain(false);
1192             return;
1193         }
1194 
1195         xen_wmb(); /* Update ioreq contents /then/ update state. */
1196 
1197         /*
1198          * We do this before we send the response so that the tools
1199          * have the opportunity to pick up on the reset before the
1200          * guest resumes and does a hlt with interrupts disabled which
1201          * causes Xen to powerdown the domain.
1202          */
1203         if (runstate_is_running()) {
1204             ShutdownCause request;
1205 
1206             if (qemu_shutdown_requested_get()) {
1207                 destroy_hvm_domain(false);
1208             }
1209             request = qemu_reset_requested_get();
1210             if (request) {
1211                 qemu_system_reset(request);
1212                 destroy_hvm_domain(true);
1213             }
1214         }
1215 
1216         req->state = STATE_IORESP_READY;
1217         xenevtchn_notify(state->xce_handle,
1218                          state->ioreq_local_port[state->send_vcpu]);
1219     }
1220 }
1221 
1222 static void xen_main_loop_prepare(XenIOState *state)
1223 {
1224     int evtchn_fd = -1;
1225 
1226     if (state->xce_handle != NULL) {
1227         evtchn_fd = xenevtchn_fd(state->xce_handle);
1228     }
1229 
1230     state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
1231                                                  state);
1232 
1233     if (evtchn_fd != -1) {
1234         CPUState *cpu_state;
1235 
1236         DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
1237         CPU_FOREACH(cpu_state) {
1238             DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
1239                     __func__, cpu_state->cpu_index, cpu_state);
1240             state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
1241         }
1242         qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
1243     }
1244 }
1245 
1246 
1247 static void xen_hvm_change_state_handler(void *opaque, bool running,
1248                                          RunState rstate)
1249 {
1250     XenIOState *state = opaque;
1251 
1252     if (running) {
1253         xen_main_loop_prepare(state);
1254     }
1255 
1256     xen_set_ioreq_server_state(xen_domid,
1257                                state->ioservid,
1258                                (rstate == RUN_STATE_RUNNING));
1259 }
1260 
1261 static void xen_exit_notifier(Notifier *n, void *data)
1262 {
1263     XenIOState *state = container_of(n, XenIOState, exit);
1264 
1265     xen_destroy_ioreq_server(xen_domid, state->ioservid);
1266     if (state->fres != NULL) {
1267         xenforeignmemory_unmap_resource(xen_fmem, state->fres);
1268     }
1269 
1270     xenevtchn_close(state->xce_handle);
1271     xs_daemon_close(state->xenstore);
1272 }
1273 
1274 #ifdef XEN_COMPAT_PHYSMAP
1275 static void xen_read_physmap(XenIOState *state)
1276 {
1277     XenPhysmap *physmap = NULL;
1278     unsigned int len, num, i;
1279     char path[80], *value = NULL;
1280     char **entries = NULL;
1281 
1282     snprintf(path, sizeof(path),
1283             "/local/domain/0/device-model/%d/physmap", xen_domid);
1284     entries = xs_directory(state->xenstore, 0, path, &num);
1285     if (entries == NULL)
1286         return;
1287 
1288     for (i = 0; i < num; i++) {
1289         physmap = g_new(XenPhysmap, 1);
1290         physmap->phys_offset = strtoull(entries[i], NULL, 16);
1291         snprintf(path, sizeof(path),
1292                 "/local/domain/0/device-model/%d/physmap/%s/start_addr",
1293                 xen_domid, entries[i]);
1294         value = xs_read(state->xenstore, 0, path, &len);
1295         if (value == NULL) {
1296             g_free(physmap);
1297             continue;
1298         }
1299         physmap->start_addr = strtoull(value, NULL, 16);
1300         free(value);
1301 
1302         snprintf(path, sizeof(path),
1303                 "/local/domain/0/device-model/%d/physmap/%s/size",
1304                 xen_domid, entries[i]);
1305         value = xs_read(state->xenstore, 0, path, &len);
1306         if (value == NULL) {
1307             g_free(physmap);
1308             continue;
1309         }
1310         physmap->size = strtoull(value, NULL, 16);
1311         free(value);
1312 
1313         snprintf(path, sizeof(path),
1314                 "/local/domain/0/device-model/%d/physmap/%s/name",
1315                 xen_domid, entries[i]);
1316         physmap->name = xs_read(state->xenstore, 0, path, &len);
1317 
1318         QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
1319     }
1320     free(entries);
1321 }
1322 #else
1323 static void xen_read_physmap(XenIOState *state)
1324 {
1325 }
1326 #endif
1327 
1328 static void xen_wakeup_notifier(Notifier *notifier, void *data)
1329 {
1330     xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
1331 }
1332 
1333 static int xen_map_ioreq_server(XenIOState *state)
1334 {
1335     void *addr = NULL;
1336     xen_pfn_t ioreq_pfn;
1337     xen_pfn_t bufioreq_pfn;
1338     evtchn_port_t bufioreq_evtchn;
1339     int rc;
1340 
1341     /*
1342      * Attempt to map using the resource API and fall back to normal
1343      * foreign mapping if this is not supported.
1344      */
1345     QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
1346     QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
1347     state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
1348                                          XENMEM_resource_ioreq_server,
1349                                          state->ioservid, 0, 2,
1350                                          &addr,
1351                                          PROT_READ | PROT_WRITE, 0);
1352     if (state->fres != NULL) {
1353         trace_xen_map_resource_ioreq(state->ioservid, addr);
1354         state->buffered_io_page = addr;
1355         state->shared_page = addr + TARGET_PAGE_SIZE;
1356     } else if (errno != EOPNOTSUPP) {
1357         error_report("failed to map ioreq server resources: error %d handle=%p",
1358                      errno, xen_xc);
1359         return -1;
1360     }
1361 
1362     rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
1363                                    (state->shared_page == NULL) ?
1364                                    &ioreq_pfn : NULL,
1365                                    (state->buffered_io_page == NULL) ?
1366                                    &bufioreq_pfn : NULL,
1367                                    &bufioreq_evtchn);
1368     if (rc < 0) {
1369         error_report("failed to get ioreq server info: error %d handle=%p",
1370                      errno, xen_xc);
1371         return rc;
1372     }
1373 
1374     if (state->shared_page == NULL) {
1375         DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
1376 
1377         state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
1378                                                   PROT_READ | PROT_WRITE,
1379                                                   1, &ioreq_pfn, NULL);
1380         if (state->shared_page == NULL) {
1381             error_report("map shared IO page returned error %d handle=%p",
1382                          errno, xen_xc);
1383         }
1384     }
1385 
1386     if (state->buffered_io_page == NULL) {
1387         DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
1388 
1389         state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
1390                                                        PROT_READ | PROT_WRITE,
1391                                                        1, &bufioreq_pfn,
1392                                                        NULL);
1393         if (state->buffered_io_page == NULL) {
1394             error_report("map buffered IO page returned error %d", errno);
1395             return -1;
1396         }
1397     }
1398 
1399     if (state->shared_page == NULL || state->buffered_io_page == NULL) {
1400         return -1;
1401     }
1402 
1403     DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
1404 
1405     state->bufioreq_remote_port = bufioreq_evtchn;
1406 
1407     return 0;
1408 }
1409 
1410 void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
1411 {
1412     MachineState *ms = MACHINE(pcms);
1413     unsigned int max_cpus = ms->smp.max_cpus;
1414     int i, rc;
1415     xen_pfn_t ioreq_pfn;
1416     XenIOState *state;
1417 
1418     state = g_new0(XenIOState, 1);
1419 
1420     state->xce_handle = xenevtchn_open(NULL, 0);
1421     if (state->xce_handle == NULL) {
1422         perror("xen: event channel open");
1423         goto err;
1424     }
1425 
1426     state->xenstore = xs_daemon_open();
1427     if (state->xenstore == NULL) {
1428         perror("xen: xenstore open");
1429         goto err;
1430     }
1431 
1432     xen_create_ioreq_server(xen_domid, &state->ioservid);
1433 
1434     state->exit.notify = xen_exit_notifier;
1435     qemu_add_exit_notifier(&state->exit);
1436 
1437     state->suspend.notify = xen_suspend_notifier;
1438     qemu_register_suspend_notifier(&state->suspend);
1439 
1440     state->wakeup.notify = xen_wakeup_notifier;
1441     qemu_register_wakeup_notifier(&state->wakeup);
1442 
1443     /*
1444      * Register wake-up support in QMP query-current-machine API
1445      */
1446     qemu_register_wakeup_support();
1447 
1448     rc = xen_map_ioreq_server(state);
1449     if (rc < 0) {
1450         goto err;
1451     }
1452 
1453     rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
1454     if (!rc) {
1455         DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
1456         state->shared_vmport_page =
1457             xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
1458                                  1, &ioreq_pfn, NULL);
1459         if (state->shared_vmport_page == NULL) {
1460             error_report("map shared vmport IO page returned error %d handle=%p",
1461                          errno, xen_xc);
1462             goto err;
1463         }
1464     } else if (rc != -ENOSYS) {
1465         error_report("get vmport regs pfn returned error %d, rc=%d",
1466                      errno, rc);
1467         goto err;
1468     }
1469 
1470     /* Note: cpus is empty at this point in init */
1471     state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus);
1472 
1473     rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true);
1474     if (rc < 0) {
1475         error_report("failed to enable ioreq server info: error %d handle=%p",
1476                      errno, xen_xc);
1477         goto err;
1478     }
1479 
1480     state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus);
1481 
1482     /* FIXME: how about if we overflow the page here? */
1483     for (i = 0; i < max_cpus; i++) {
1484         rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
1485                                         xen_vcpu_eport(state->shared_page, i));
1486         if (rc == -1) {
1487             error_report("shared evtchn %d bind error %d", i, errno);
1488             goto err;
1489         }
1490         state->ioreq_local_port[i] = rc;
1491     }
1492 
1493     rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
1494                                     state->bufioreq_remote_port);
1495     if (rc == -1) {
1496         error_report("buffered evtchn bind error %d", errno);
1497         goto err;
1498     }
1499     state->bufioreq_local_port = rc;
1500 
1501     /* Init RAM management */
1502 #ifdef XEN_COMPAT_PHYSMAP
1503     xen_map_cache_init(xen_phys_offset_to_gaddr, state);
1504 #else
1505     xen_map_cache_init(NULL, state);
1506 #endif
1507     xen_ram_init(pcms, ms->ram_size, ram_memory);
1508 
1509     qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
1510 
1511     state->memory_listener = xen_memory_listener;
1512     memory_listener_register(&state->memory_listener, &address_space_memory);
1513     state->log_for_dirtybit = NULL;
1514 
1515     state->io_listener = xen_io_listener;
1516     memory_listener_register(&state->io_listener, &address_space_io);
1517 
1518     state->device_listener = xen_device_listener;
1519     QLIST_INIT(&state->dev_list);
1520     device_listener_register(&state->device_listener);
1521 
1522     xen_bus_init();
1523 
1524     /* Initialize backend core & drivers */
1525     if (xen_be_init() != 0) {
1526         error_report("xen backend core setup failed");
1527         goto err;
1528     }
1529     xen_be_register_common();
1530 
1531     QLIST_INIT(&xen_physmap);
1532     xen_read_physmap(state);
1533 
1534     /* Disable ACPI build because Xen handles it */
1535     pcms->acpi_build_enabled = false;
1536 
1537     return;
1538 
1539 err:
1540     error_report("xen hardware virtual machine initialisation failed");
1541     exit(1);
1542 }
1543 
1544 void destroy_hvm_domain(bool reboot)
1545 {
1546     xc_interface *xc_handle;
1547     int sts;
1548     int rc;
1549 
1550     unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff;
1551 
1552     if (xen_dmod) {
1553         rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason);
1554         if (!rc) {
1555             return;
1556         }
1557         if (errno != ENOTTY /* old Xen */) {
1558             perror("xendevicemodel_shutdown failed");
1559         }
1560         /* well, try the old thing then */
1561     }
1562 
1563     xc_handle = xc_interface_open(0, 0, 0);
1564     if (xc_handle == NULL) {
1565         fprintf(stderr, "Cannot acquire xenctrl handle\n");
1566     } else {
1567         sts = xc_domain_shutdown(xc_handle, xen_domid, reason);
1568         if (sts != 0) {
1569             fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1570                     "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1571                     sts, strerror(errno));
1572         } else {
1573             fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1574                     reboot ? "reboot" : "poweroff");
1575         }
1576         xc_interface_close(xc_handle);
1577     }
1578 }
1579 
1580 void xen_register_framebuffer(MemoryRegion *mr)
1581 {
1582     framebuffer = mr;
1583 }
1584 
1585 void xen_shutdown_fatal_error(const char *fmt, ...)
1586 {
1587     va_list ap;
1588 
1589     va_start(ap, fmt);
1590     vfprintf(stderr, fmt, ap);
1591     va_end(ap);
1592     fprintf(stderr, "Will destroy the domain.\n");
1593     /* destroy the domain */
1594     qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR);
1595 }
1596 
1597 void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
1598 {
1599     if (unlikely(xen_in_migration)) {
1600         int rc;
1601         ram_addr_t start_pfn, nb_pages;
1602 
1603         start = xen_phys_offset_to_gaddr(start, length);
1604 
1605         if (length == 0) {
1606             length = TARGET_PAGE_SIZE;
1607         }
1608         start_pfn = start >> TARGET_PAGE_BITS;
1609         nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
1610             - start_pfn;
1611         rc = xen_modified_memory(xen_domid, start_pfn, nb_pages);
1612         if (rc) {
1613             fprintf(stderr,
1614                     "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
1615                     __func__, start, nb_pages, errno, strerror(errno));
1616         }
1617     }
1618 }
1619 
1620 void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
1621 {
1622     if (enable) {
1623         memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
1624     } else {
1625         memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
1626     }
1627 }
1628