1*93d43e7eSAnthony Xu /* 2*93d43e7eSAnthony Xu * Copyright (C) 2010 Citrix Ltd. 3*93d43e7eSAnthony Xu * 4*93d43e7eSAnthony Xu * This work is licensed under the terms of the GNU GPL, version 2. See 5*93d43e7eSAnthony Xu * the COPYING file in the top-level directory. 6*93d43e7eSAnthony Xu * 7*93d43e7eSAnthony Xu * Contributions after 2012-01-13 are licensed under the terms of the 8*93d43e7eSAnthony Xu * GNU GPL, version 2 or (at your option) any later version. 9*93d43e7eSAnthony Xu */ 10*93d43e7eSAnthony Xu 11*93d43e7eSAnthony Xu #include "qemu/osdep.h" 12*93d43e7eSAnthony Xu 13*93d43e7eSAnthony Xu #include "cpu.h" 14*93d43e7eSAnthony Xu #include "hw/pci/pci.h" 15*93d43e7eSAnthony Xu #include "hw/i386/pc.h" 16*93d43e7eSAnthony Xu #include "hw/i386/apic-msidef.h" 17*93d43e7eSAnthony Xu #include "hw/xen/xen_common.h" 18*93d43e7eSAnthony Xu #include "hw/xen/xen_backend.h" 19*93d43e7eSAnthony Xu #include "qmp-commands.h" 20*93d43e7eSAnthony Xu 21*93d43e7eSAnthony Xu #include "sysemu/char.h" 22*93d43e7eSAnthony Xu #include "qemu/error-report.h" 23*93d43e7eSAnthony Xu #include "qemu/range.h" 24*93d43e7eSAnthony Xu #include "sysemu/xen-mapcache.h" 25*93d43e7eSAnthony Xu #include "trace.h" 26*93d43e7eSAnthony Xu #include "exec/address-spaces.h" 27*93d43e7eSAnthony Xu 28*93d43e7eSAnthony Xu #include <xen/hvm/ioreq.h> 29*93d43e7eSAnthony Xu #include <xen/hvm/params.h> 30*93d43e7eSAnthony Xu #include <xen/hvm/e820.h> 31*93d43e7eSAnthony Xu 32*93d43e7eSAnthony Xu //#define DEBUG_XEN_HVM 33*93d43e7eSAnthony Xu 34*93d43e7eSAnthony Xu #ifdef DEBUG_XEN_HVM 35*93d43e7eSAnthony Xu #define DPRINTF(fmt, ...) \ 36*93d43e7eSAnthony Xu do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) 37*93d43e7eSAnthony Xu #else 38*93d43e7eSAnthony Xu #define DPRINTF(fmt, ...) \ 39*93d43e7eSAnthony Xu do { } while (0) 40*93d43e7eSAnthony Xu #endif 41*93d43e7eSAnthony Xu 42*93d43e7eSAnthony Xu static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; 43*93d43e7eSAnthony Xu static MemoryRegion *framebuffer; 44*93d43e7eSAnthony Xu static bool xen_in_migration; 45*93d43e7eSAnthony Xu 46*93d43e7eSAnthony Xu /* Compatibility with older version */ 47*93d43e7eSAnthony Xu 48*93d43e7eSAnthony Xu /* This allows QEMU to build on a system that has Xen 4.5 or earlier 49*93d43e7eSAnthony Xu * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h 50*93d43e7eSAnthony Xu * needs to be included before this block and hw/xen/xen_common.h needs to 51*93d43e7eSAnthony Xu * be included before xen/hvm/ioreq.h 52*93d43e7eSAnthony Xu */ 53*93d43e7eSAnthony Xu #ifndef IOREQ_TYPE_VMWARE_PORT 54*93d43e7eSAnthony Xu #define IOREQ_TYPE_VMWARE_PORT 3 55*93d43e7eSAnthony Xu struct vmware_regs { 56*93d43e7eSAnthony Xu uint32_t esi; 57*93d43e7eSAnthony Xu uint32_t edi; 58*93d43e7eSAnthony Xu uint32_t ebx; 59*93d43e7eSAnthony Xu uint32_t ecx; 60*93d43e7eSAnthony Xu uint32_t edx; 61*93d43e7eSAnthony Xu }; 62*93d43e7eSAnthony Xu typedef struct vmware_regs vmware_regs_t; 63*93d43e7eSAnthony Xu 64*93d43e7eSAnthony Xu struct shared_vmport_iopage { 65*93d43e7eSAnthony Xu struct vmware_regs vcpu_vmport_regs[1]; 66*93d43e7eSAnthony Xu }; 67*93d43e7eSAnthony Xu typedef struct shared_vmport_iopage shared_vmport_iopage_t; 68*93d43e7eSAnthony Xu #endif 69*93d43e7eSAnthony Xu 70*93d43e7eSAnthony Xu static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) 71*93d43e7eSAnthony Xu { 72*93d43e7eSAnthony Xu return shared_page->vcpu_ioreq[i].vp_eport; 73*93d43e7eSAnthony Xu } 74*93d43e7eSAnthony Xu static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) 75*93d43e7eSAnthony Xu { 76*93d43e7eSAnthony Xu return &shared_page->vcpu_ioreq[vcpu]; 77*93d43e7eSAnthony Xu } 78*93d43e7eSAnthony Xu 79*93d43e7eSAnthony Xu #define BUFFER_IO_MAX_DELAY 100 80*93d43e7eSAnthony Xu 81*93d43e7eSAnthony Xu typedef struct XenPhysmap { 82*93d43e7eSAnthony Xu hwaddr start_addr; 83*93d43e7eSAnthony Xu ram_addr_t size; 84*93d43e7eSAnthony Xu const char *name; 85*93d43e7eSAnthony Xu hwaddr phys_offset; 86*93d43e7eSAnthony Xu 87*93d43e7eSAnthony Xu QLIST_ENTRY(XenPhysmap) list; 88*93d43e7eSAnthony Xu } XenPhysmap; 89*93d43e7eSAnthony Xu 90*93d43e7eSAnthony Xu typedef struct XenIOState { 91*93d43e7eSAnthony Xu ioservid_t ioservid; 92*93d43e7eSAnthony Xu shared_iopage_t *shared_page; 93*93d43e7eSAnthony Xu shared_vmport_iopage_t *shared_vmport_page; 94*93d43e7eSAnthony Xu buffered_iopage_t *buffered_io_page; 95*93d43e7eSAnthony Xu QEMUTimer *buffered_io_timer; 96*93d43e7eSAnthony Xu CPUState **cpu_by_vcpu_id; 97*93d43e7eSAnthony Xu /* the evtchn port for polling the notification, */ 98*93d43e7eSAnthony Xu evtchn_port_t *ioreq_local_port; 99*93d43e7eSAnthony Xu /* evtchn local port for buffered io */ 100*93d43e7eSAnthony Xu evtchn_port_t bufioreq_local_port; 101*93d43e7eSAnthony Xu /* the evtchn fd for polling */ 102*93d43e7eSAnthony Xu xenevtchn_handle *xce_handle; 103*93d43e7eSAnthony Xu /* which vcpu we are serving */ 104*93d43e7eSAnthony Xu int send_vcpu; 105*93d43e7eSAnthony Xu 106*93d43e7eSAnthony Xu struct xs_handle *xenstore; 107*93d43e7eSAnthony Xu MemoryListener memory_listener; 108*93d43e7eSAnthony Xu MemoryListener io_listener; 109*93d43e7eSAnthony Xu DeviceListener device_listener; 110*93d43e7eSAnthony Xu QLIST_HEAD(, XenPhysmap) physmap; 111*93d43e7eSAnthony Xu hwaddr free_phys_offset; 112*93d43e7eSAnthony Xu const XenPhysmap *log_for_dirtybit; 113*93d43e7eSAnthony Xu 114*93d43e7eSAnthony Xu Notifier exit; 115*93d43e7eSAnthony Xu Notifier suspend; 116*93d43e7eSAnthony Xu Notifier wakeup; 117*93d43e7eSAnthony Xu } XenIOState; 118*93d43e7eSAnthony Xu 119*93d43e7eSAnthony Xu /* Xen specific function for piix pci */ 120*93d43e7eSAnthony Xu 121*93d43e7eSAnthony Xu int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) 122*93d43e7eSAnthony Xu { 123*93d43e7eSAnthony Xu return irq_num + ((pci_dev->devfn >> 3) << 2); 124*93d43e7eSAnthony Xu } 125*93d43e7eSAnthony Xu 126*93d43e7eSAnthony Xu void xen_piix3_set_irq(void *opaque, int irq_num, int level) 127*93d43e7eSAnthony Xu { 128*93d43e7eSAnthony Xu xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, 129*93d43e7eSAnthony Xu irq_num & 3, level); 130*93d43e7eSAnthony Xu } 131*93d43e7eSAnthony Xu 132*93d43e7eSAnthony Xu void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) 133*93d43e7eSAnthony Xu { 134*93d43e7eSAnthony Xu int i; 135*93d43e7eSAnthony Xu 136*93d43e7eSAnthony Xu /* Scan for updates to PCI link routes (0x60-0x63). */ 137*93d43e7eSAnthony Xu for (i = 0; i < len; i++) { 138*93d43e7eSAnthony Xu uint8_t v = (val >> (8 * i)) & 0xff; 139*93d43e7eSAnthony Xu if (v & 0x80) { 140*93d43e7eSAnthony Xu v = 0; 141*93d43e7eSAnthony Xu } 142*93d43e7eSAnthony Xu v &= 0xf; 143*93d43e7eSAnthony Xu if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { 144*93d43e7eSAnthony Xu xen_set_pci_link_route(xen_domid, address + i - 0x60, v); 145*93d43e7eSAnthony Xu } 146*93d43e7eSAnthony Xu } 147*93d43e7eSAnthony Xu } 148*93d43e7eSAnthony Xu 149*93d43e7eSAnthony Xu int xen_is_pirq_msi(uint32_t msi_data) 150*93d43e7eSAnthony Xu { 151*93d43e7eSAnthony Xu /* If vector is 0, the msi is remapped into a pirq, passed as 152*93d43e7eSAnthony Xu * dest_id. 153*93d43e7eSAnthony Xu */ 154*93d43e7eSAnthony Xu return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0; 155*93d43e7eSAnthony Xu } 156*93d43e7eSAnthony Xu 157*93d43e7eSAnthony Xu void xen_hvm_inject_msi(uint64_t addr, uint32_t data) 158*93d43e7eSAnthony Xu { 159*93d43e7eSAnthony Xu xen_inject_msi(xen_domid, addr, data); 160*93d43e7eSAnthony Xu } 161*93d43e7eSAnthony Xu 162*93d43e7eSAnthony Xu static void xen_suspend_notifier(Notifier *notifier, void *data) 163*93d43e7eSAnthony Xu { 164*93d43e7eSAnthony Xu xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); 165*93d43e7eSAnthony Xu } 166*93d43e7eSAnthony Xu 167*93d43e7eSAnthony Xu /* Xen Interrupt Controller */ 168*93d43e7eSAnthony Xu 169*93d43e7eSAnthony Xu static void xen_set_irq(void *opaque, int irq, int level) 170*93d43e7eSAnthony Xu { 171*93d43e7eSAnthony Xu xen_set_isa_irq_level(xen_domid, irq, level); 172*93d43e7eSAnthony Xu } 173*93d43e7eSAnthony Xu 174*93d43e7eSAnthony Xu qemu_irq *xen_interrupt_controller_init(void) 175*93d43e7eSAnthony Xu { 176*93d43e7eSAnthony Xu return qemu_allocate_irqs(xen_set_irq, NULL, 16); 177*93d43e7eSAnthony Xu } 178*93d43e7eSAnthony Xu 179*93d43e7eSAnthony Xu /* Memory Ops */ 180*93d43e7eSAnthony Xu 181*93d43e7eSAnthony Xu static void xen_ram_init(PCMachineState *pcms, 182*93d43e7eSAnthony Xu ram_addr_t ram_size, MemoryRegion **ram_memory_p) 183*93d43e7eSAnthony Xu { 184*93d43e7eSAnthony Xu MemoryRegion *sysmem = get_system_memory(); 185*93d43e7eSAnthony Xu ram_addr_t block_len; 186*93d43e7eSAnthony Xu uint64_t user_lowmem = object_property_get_int(qdev_get_machine(), 187*93d43e7eSAnthony Xu PC_MACHINE_MAX_RAM_BELOW_4G, 188*93d43e7eSAnthony Xu &error_abort); 189*93d43e7eSAnthony Xu 190*93d43e7eSAnthony Xu /* Handle the machine opt max-ram-below-4g. It is basically doing 191*93d43e7eSAnthony Xu * min(xen limit, user limit). 192*93d43e7eSAnthony Xu */ 193*93d43e7eSAnthony Xu if (!user_lowmem) { 194*93d43e7eSAnthony Xu user_lowmem = HVM_BELOW_4G_RAM_END; /* default */ 195*93d43e7eSAnthony Xu } 196*93d43e7eSAnthony Xu if (HVM_BELOW_4G_RAM_END <= user_lowmem) { 197*93d43e7eSAnthony Xu user_lowmem = HVM_BELOW_4G_RAM_END; 198*93d43e7eSAnthony Xu } 199*93d43e7eSAnthony Xu 200*93d43e7eSAnthony Xu if (ram_size >= user_lowmem) { 201*93d43e7eSAnthony Xu pcms->above_4g_mem_size = ram_size - user_lowmem; 202*93d43e7eSAnthony Xu pcms->below_4g_mem_size = user_lowmem; 203*93d43e7eSAnthony Xu } else { 204*93d43e7eSAnthony Xu pcms->above_4g_mem_size = 0; 205*93d43e7eSAnthony Xu pcms->below_4g_mem_size = ram_size; 206*93d43e7eSAnthony Xu } 207*93d43e7eSAnthony Xu if (!pcms->above_4g_mem_size) { 208*93d43e7eSAnthony Xu block_len = ram_size; 209*93d43e7eSAnthony Xu } else { 210*93d43e7eSAnthony Xu /* 211*93d43e7eSAnthony Xu * Xen does not allocate the memory continuously, it keeps a 212*93d43e7eSAnthony Xu * hole of the size computed above or passed in. 213*93d43e7eSAnthony Xu */ 214*93d43e7eSAnthony Xu block_len = (1ULL << 32) + pcms->above_4g_mem_size; 215*93d43e7eSAnthony Xu } 216*93d43e7eSAnthony Xu memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, 217*93d43e7eSAnthony Xu &error_fatal); 218*93d43e7eSAnthony Xu *ram_memory_p = &ram_memory; 219*93d43e7eSAnthony Xu vmstate_register_ram_global(&ram_memory); 220*93d43e7eSAnthony Xu 221*93d43e7eSAnthony Xu memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k", 222*93d43e7eSAnthony Xu &ram_memory, 0, 0xa0000); 223*93d43e7eSAnthony Xu memory_region_add_subregion(sysmem, 0, &ram_640k); 224*93d43e7eSAnthony Xu /* Skip of the VGA IO memory space, it will be registered later by the VGA 225*93d43e7eSAnthony Xu * emulated device. 226*93d43e7eSAnthony Xu * 227*93d43e7eSAnthony Xu * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load 228*93d43e7eSAnthony Xu * the Options ROM, so it is registered here as RAM. 229*93d43e7eSAnthony Xu */ 230*93d43e7eSAnthony Xu memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", 231*93d43e7eSAnthony Xu &ram_memory, 0xc0000, 232*93d43e7eSAnthony Xu pcms->below_4g_mem_size - 0xc0000); 233*93d43e7eSAnthony Xu memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); 234*93d43e7eSAnthony Xu if (pcms->above_4g_mem_size > 0) { 235*93d43e7eSAnthony Xu memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", 236*93d43e7eSAnthony Xu &ram_memory, 0x100000000ULL, 237*93d43e7eSAnthony Xu pcms->above_4g_mem_size); 238*93d43e7eSAnthony Xu memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); 239*93d43e7eSAnthony Xu } 240*93d43e7eSAnthony Xu } 241*93d43e7eSAnthony Xu 242*93d43e7eSAnthony Xu void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, 243*93d43e7eSAnthony Xu Error **errp) 244*93d43e7eSAnthony Xu { 245*93d43e7eSAnthony Xu unsigned long nr_pfn; 246*93d43e7eSAnthony Xu xen_pfn_t *pfn_list; 247*93d43e7eSAnthony Xu int i; 248*93d43e7eSAnthony Xu 249*93d43e7eSAnthony Xu if (runstate_check(RUN_STATE_INMIGRATE)) { 250*93d43e7eSAnthony Xu /* RAM already populated in Xen */ 251*93d43e7eSAnthony Xu fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT 252*93d43e7eSAnthony Xu " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", 253*93d43e7eSAnthony Xu __func__, size, ram_addr); 254*93d43e7eSAnthony Xu return; 255*93d43e7eSAnthony Xu } 256*93d43e7eSAnthony Xu 257*93d43e7eSAnthony Xu if (mr == &ram_memory) { 258*93d43e7eSAnthony Xu return; 259*93d43e7eSAnthony Xu } 260*93d43e7eSAnthony Xu 261*93d43e7eSAnthony Xu trace_xen_ram_alloc(ram_addr, size); 262*93d43e7eSAnthony Xu 263*93d43e7eSAnthony Xu nr_pfn = size >> TARGET_PAGE_BITS; 264*93d43e7eSAnthony Xu pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); 265*93d43e7eSAnthony Xu 266*93d43e7eSAnthony Xu for (i = 0; i < nr_pfn; i++) { 267*93d43e7eSAnthony Xu pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; 268*93d43e7eSAnthony Xu } 269*93d43e7eSAnthony Xu 270*93d43e7eSAnthony Xu if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { 271*93d43e7eSAnthony Xu error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, 272*93d43e7eSAnthony Xu ram_addr); 273*93d43e7eSAnthony Xu } 274*93d43e7eSAnthony Xu 275*93d43e7eSAnthony Xu g_free(pfn_list); 276*93d43e7eSAnthony Xu } 277*93d43e7eSAnthony Xu 278*93d43e7eSAnthony Xu static XenPhysmap *get_physmapping(XenIOState *state, 279*93d43e7eSAnthony Xu hwaddr start_addr, ram_addr_t size) 280*93d43e7eSAnthony Xu { 281*93d43e7eSAnthony Xu XenPhysmap *physmap = NULL; 282*93d43e7eSAnthony Xu 283*93d43e7eSAnthony Xu start_addr &= TARGET_PAGE_MASK; 284*93d43e7eSAnthony Xu 285*93d43e7eSAnthony Xu QLIST_FOREACH(physmap, &state->physmap, list) { 286*93d43e7eSAnthony Xu if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { 287*93d43e7eSAnthony Xu return physmap; 288*93d43e7eSAnthony Xu } 289*93d43e7eSAnthony Xu } 290*93d43e7eSAnthony Xu return NULL; 291*93d43e7eSAnthony Xu } 292*93d43e7eSAnthony Xu 293*93d43e7eSAnthony Xu static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr, 294*93d43e7eSAnthony Xu ram_addr_t size, void *opaque) 295*93d43e7eSAnthony Xu { 296*93d43e7eSAnthony Xu hwaddr addr = start_addr & TARGET_PAGE_MASK; 297*93d43e7eSAnthony Xu XenIOState *xen_io_state = opaque; 298*93d43e7eSAnthony Xu XenPhysmap *physmap = NULL; 299*93d43e7eSAnthony Xu 300*93d43e7eSAnthony Xu QLIST_FOREACH(physmap, &xen_io_state->physmap, list) { 301*93d43e7eSAnthony Xu if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) { 302*93d43e7eSAnthony Xu return physmap->start_addr; 303*93d43e7eSAnthony Xu } 304*93d43e7eSAnthony Xu } 305*93d43e7eSAnthony Xu 306*93d43e7eSAnthony Xu return start_addr; 307*93d43e7eSAnthony Xu } 308*93d43e7eSAnthony Xu 309*93d43e7eSAnthony Xu static int xen_add_to_physmap(XenIOState *state, 310*93d43e7eSAnthony Xu hwaddr start_addr, 311*93d43e7eSAnthony Xu ram_addr_t size, 312*93d43e7eSAnthony Xu MemoryRegion *mr, 313*93d43e7eSAnthony Xu hwaddr offset_within_region) 314*93d43e7eSAnthony Xu { 315*93d43e7eSAnthony Xu unsigned long i = 0; 316*93d43e7eSAnthony Xu int rc = 0; 317*93d43e7eSAnthony Xu XenPhysmap *physmap = NULL; 318*93d43e7eSAnthony Xu hwaddr pfn, start_gpfn; 319*93d43e7eSAnthony Xu hwaddr phys_offset = memory_region_get_ram_addr(mr); 320*93d43e7eSAnthony Xu char path[80], value[17]; 321*93d43e7eSAnthony Xu const char *mr_name; 322*93d43e7eSAnthony Xu 323*93d43e7eSAnthony Xu if (get_physmapping(state, start_addr, size)) { 324*93d43e7eSAnthony Xu return 0; 325*93d43e7eSAnthony Xu } 326*93d43e7eSAnthony Xu if (size <= 0) { 327*93d43e7eSAnthony Xu return -1; 328*93d43e7eSAnthony Xu } 329*93d43e7eSAnthony Xu 330*93d43e7eSAnthony Xu /* Xen can only handle a single dirty log region for now and we want 331*93d43e7eSAnthony Xu * the linear framebuffer to be that region. 332*93d43e7eSAnthony Xu * Avoid tracking any regions that is not videoram and avoid tracking 333*93d43e7eSAnthony Xu * the legacy vga region. */ 334*93d43e7eSAnthony Xu if (mr == framebuffer && start_addr > 0xbffff) { 335*93d43e7eSAnthony Xu goto go_physmap; 336*93d43e7eSAnthony Xu } 337*93d43e7eSAnthony Xu return -1; 338*93d43e7eSAnthony Xu 339*93d43e7eSAnthony Xu go_physmap: 340*93d43e7eSAnthony Xu DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", 341*93d43e7eSAnthony Xu start_addr, start_addr + size); 342*93d43e7eSAnthony Xu 343*93d43e7eSAnthony Xu pfn = phys_offset >> TARGET_PAGE_BITS; 344*93d43e7eSAnthony Xu start_gpfn = start_addr >> TARGET_PAGE_BITS; 345*93d43e7eSAnthony Xu for (i = 0; i < size >> TARGET_PAGE_BITS; i++) { 346*93d43e7eSAnthony Xu unsigned long idx = pfn + i; 347*93d43e7eSAnthony Xu xen_pfn_t gpfn = start_gpfn + i; 348*93d43e7eSAnthony Xu 349*93d43e7eSAnthony Xu rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); 350*93d43e7eSAnthony Xu if (rc) { 351*93d43e7eSAnthony Xu DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %" 352*93d43e7eSAnthony Xu PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno); 353*93d43e7eSAnthony Xu return -rc; 354*93d43e7eSAnthony Xu } 355*93d43e7eSAnthony Xu } 356*93d43e7eSAnthony Xu 357*93d43e7eSAnthony Xu mr_name = memory_region_name(mr); 358*93d43e7eSAnthony Xu 359*93d43e7eSAnthony Xu physmap = g_malloc(sizeof (XenPhysmap)); 360*93d43e7eSAnthony Xu 361*93d43e7eSAnthony Xu physmap->start_addr = start_addr; 362*93d43e7eSAnthony Xu physmap->size = size; 363*93d43e7eSAnthony Xu physmap->name = mr_name; 364*93d43e7eSAnthony Xu physmap->phys_offset = phys_offset; 365*93d43e7eSAnthony Xu 366*93d43e7eSAnthony Xu QLIST_INSERT_HEAD(&state->physmap, physmap, list); 367*93d43e7eSAnthony Xu 368*93d43e7eSAnthony Xu xc_domain_pin_memory_cacheattr(xen_xc, xen_domid, 369*93d43e7eSAnthony Xu start_addr >> TARGET_PAGE_BITS, 370*93d43e7eSAnthony Xu (start_addr + size - 1) >> TARGET_PAGE_BITS, 371*93d43e7eSAnthony Xu XEN_DOMCTL_MEM_CACHEATTR_WB); 372*93d43e7eSAnthony Xu 373*93d43e7eSAnthony Xu snprintf(path, sizeof(path), 374*93d43e7eSAnthony Xu "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", 375*93d43e7eSAnthony Xu xen_domid, (uint64_t)phys_offset); 376*93d43e7eSAnthony Xu snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr); 377*93d43e7eSAnthony Xu if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { 378*93d43e7eSAnthony Xu return -1; 379*93d43e7eSAnthony Xu } 380*93d43e7eSAnthony Xu snprintf(path, sizeof(path), 381*93d43e7eSAnthony Xu "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", 382*93d43e7eSAnthony Xu xen_domid, (uint64_t)phys_offset); 383*93d43e7eSAnthony Xu snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size); 384*93d43e7eSAnthony Xu if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { 385*93d43e7eSAnthony Xu return -1; 386*93d43e7eSAnthony Xu } 387*93d43e7eSAnthony Xu if (mr_name) { 388*93d43e7eSAnthony Xu snprintf(path, sizeof(path), 389*93d43e7eSAnthony Xu "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", 390*93d43e7eSAnthony Xu xen_domid, (uint64_t)phys_offset); 391*93d43e7eSAnthony Xu if (!xs_write(state->xenstore, 0, path, mr_name, strlen(mr_name))) { 392*93d43e7eSAnthony Xu return -1; 393*93d43e7eSAnthony Xu } 394*93d43e7eSAnthony Xu } 395*93d43e7eSAnthony Xu 396*93d43e7eSAnthony Xu return 0; 397*93d43e7eSAnthony Xu } 398*93d43e7eSAnthony Xu 399*93d43e7eSAnthony Xu static int xen_remove_from_physmap(XenIOState *state, 400*93d43e7eSAnthony Xu hwaddr start_addr, 401*93d43e7eSAnthony Xu ram_addr_t size) 402*93d43e7eSAnthony Xu { 403*93d43e7eSAnthony Xu unsigned long i = 0; 404*93d43e7eSAnthony Xu int rc = 0; 405*93d43e7eSAnthony Xu XenPhysmap *physmap = NULL; 406*93d43e7eSAnthony Xu hwaddr phys_offset = 0; 407*93d43e7eSAnthony Xu 408*93d43e7eSAnthony Xu physmap = get_physmapping(state, start_addr, size); 409*93d43e7eSAnthony Xu if (physmap == NULL) { 410*93d43e7eSAnthony Xu return -1; 411*93d43e7eSAnthony Xu } 412*93d43e7eSAnthony Xu 413*93d43e7eSAnthony Xu phys_offset = physmap->phys_offset; 414*93d43e7eSAnthony Xu size = physmap->size; 415*93d43e7eSAnthony Xu 416*93d43e7eSAnthony Xu DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at " 417*93d43e7eSAnthony Xu "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset); 418*93d43e7eSAnthony Xu 419*93d43e7eSAnthony Xu size >>= TARGET_PAGE_BITS; 420*93d43e7eSAnthony Xu start_addr >>= TARGET_PAGE_BITS; 421*93d43e7eSAnthony Xu phys_offset >>= TARGET_PAGE_BITS; 422*93d43e7eSAnthony Xu for (i = 0; i < size; i++) { 423*93d43e7eSAnthony Xu xen_pfn_t idx = start_addr + i; 424*93d43e7eSAnthony Xu xen_pfn_t gpfn = phys_offset + i; 425*93d43e7eSAnthony Xu 426*93d43e7eSAnthony Xu rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); 427*93d43e7eSAnthony Xu if (rc) { 428*93d43e7eSAnthony Xu fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %" 429*93d43e7eSAnthony Xu PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno); 430*93d43e7eSAnthony Xu return -rc; 431*93d43e7eSAnthony Xu } 432*93d43e7eSAnthony Xu } 433*93d43e7eSAnthony Xu 434*93d43e7eSAnthony Xu QLIST_REMOVE(physmap, list); 435*93d43e7eSAnthony Xu if (state->log_for_dirtybit == physmap) { 436*93d43e7eSAnthony Xu state->log_for_dirtybit = NULL; 437*93d43e7eSAnthony Xu } 438*93d43e7eSAnthony Xu g_free(physmap); 439*93d43e7eSAnthony Xu 440*93d43e7eSAnthony Xu return 0; 441*93d43e7eSAnthony Xu } 442*93d43e7eSAnthony Xu 443*93d43e7eSAnthony Xu static void xen_set_memory(struct MemoryListener *listener, 444*93d43e7eSAnthony Xu MemoryRegionSection *section, 445*93d43e7eSAnthony Xu bool add) 446*93d43e7eSAnthony Xu { 447*93d43e7eSAnthony Xu XenIOState *state = container_of(listener, XenIOState, memory_listener); 448*93d43e7eSAnthony Xu hwaddr start_addr = section->offset_within_address_space; 449*93d43e7eSAnthony Xu ram_addr_t size = int128_get64(section->size); 450*93d43e7eSAnthony Xu bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); 451*93d43e7eSAnthony Xu hvmmem_type_t mem_type; 452*93d43e7eSAnthony Xu 453*93d43e7eSAnthony Xu if (section->mr == &ram_memory) { 454*93d43e7eSAnthony Xu return; 455*93d43e7eSAnthony Xu } else { 456*93d43e7eSAnthony Xu if (add) { 457*93d43e7eSAnthony Xu xen_map_memory_section(xen_domid, state->ioservid, 458*93d43e7eSAnthony Xu section); 459*93d43e7eSAnthony Xu } else { 460*93d43e7eSAnthony Xu xen_unmap_memory_section(xen_domid, state->ioservid, 461*93d43e7eSAnthony Xu section); 462*93d43e7eSAnthony Xu } 463*93d43e7eSAnthony Xu } 464*93d43e7eSAnthony Xu 465*93d43e7eSAnthony Xu if (!memory_region_is_ram(section->mr)) { 466*93d43e7eSAnthony Xu return; 467*93d43e7eSAnthony Xu } 468*93d43e7eSAnthony Xu 469*93d43e7eSAnthony Xu if (log_dirty != add) { 470*93d43e7eSAnthony Xu return; 471*93d43e7eSAnthony Xu } 472*93d43e7eSAnthony Xu 473*93d43e7eSAnthony Xu trace_xen_client_set_memory(start_addr, size, log_dirty); 474*93d43e7eSAnthony Xu 475*93d43e7eSAnthony Xu start_addr &= TARGET_PAGE_MASK; 476*93d43e7eSAnthony Xu size = TARGET_PAGE_ALIGN(size); 477*93d43e7eSAnthony Xu 478*93d43e7eSAnthony Xu if (add) { 479*93d43e7eSAnthony Xu if (!memory_region_is_rom(section->mr)) { 480*93d43e7eSAnthony Xu xen_add_to_physmap(state, start_addr, size, 481*93d43e7eSAnthony Xu section->mr, section->offset_within_region); 482*93d43e7eSAnthony Xu } else { 483*93d43e7eSAnthony Xu mem_type = HVMMEM_ram_ro; 484*93d43e7eSAnthony Xu if (xen_set_mem_type(xen_domid, mem_type, 485*93d43e7eSAnthony Xu start_addr >> TARGET_PAGE_BITS, 486*93d43e7eSAnthony Xu size >> TARGET_PAGE_BITS)) { 487*93d43e7eSAnthony Xu DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n", 488*93d43e7eSAnthony Xu start_addr); 489*93d43e7eSAnthony Xu } 490*93d43e7eSAnthony Xu } 491*93d43e7eSAnthony Xu } else { 492*93d43e7eSAnthony Xu if (xen_remove_from_physmap(state, start_addr, size) < 0) { 493*93d43e7eSAnthony Xu DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); 494*93d43e7eSAnthony Xu } 495*93d43e7eSAnthony Xu } 496*93d43e7eSAnthony Xu } 497*93d43e7eSAnthony Xu 498*93d43e7eSAnthony Xu static void xen_region_add(MemoryListener *listener, 499*93d43e7eSAnthony Xu MemoryRegionSection *section) 500*93d43e7eSAnthony Xu { 501*93d43e7eSAnthony Xu memory_region_ref(section->mr); 502*93d43e7eSAnthony Xu xen_set_memory(listener, section, true); 503*93d43e7eSAnthony Xu } 504*93d43e7eSAnthony Xu 505*93d43e7eSAnthony Xu static void xen_region_del(MemoryListener *listener, 506*93d43e7eSAnthony Xu MemoryRegionSection *section) 507*93d43e7eSAnthony Xu { 508*93d43e7eSAnthony Xu xen_set_memory(listener, section, false); 509*93d43e7eSAnthony Xu memory_region_unref(section->mr); 510*93d43e7eSAnthony Xu } 511*93d43e7eSAnthony Xu 512*93d43e7eSAnthony Xu static void xen_io_add(MemoryListener *listener, 513*93d43e7eSAnthony Xu MemoryRegionSection *section) 514*93d43e7eSAnthony Xu { 515*93d43e7eSAnthony Xu XenIOState *state = container_of(listener, XenIOState, io_listener); 516*93d43e7eSAnthony Xu MemoryRegion *mr = section->mr; 517*93d43e7eSAnthony Xu 518*93d43e7eSAnthony Xu if (mr->ops == &unassigned_io_ops) { 519*93d43e7eSAnthony Xu return; 520*93d43e7eSAnthony Xu } 521*93d43e7eSAnthony Xu 522*93d43e7eSAnthony Xu memory_region_ref(mr); 523*93d43e7eSAnthony Xu 524*93d43e7eSAnthony Xu xen_map_io_section(xen_domid, state->ioservid, section); 525*93d43e7eSAnthony Xu } 526*93d43e7eSAnthony Xu 527*93d43e7eSAnthony Xu static void xen_io_del(MemoryListener *listener, 528*93d43e7eSAnthony Xu MemoryRegionSection *section) 529*93d43e7eSAnthony Xu { 530*93d43e7eSAnthony Xu XenIOState *state = container_of(listener, XenIOState, io_listener); 531*93d43e7eSAnthony Xu MemoryRegion *mr = section->mr; 532*93d43e7eSAnthony Xu 533*93d43e7eSAnthony Xu if (mr->ops == &unassigned_io_ops) { 534*93d43e7eSAnthony Xu return; 535*93d43e7eSAnthony Xu } 536*93d43e7eSAnthony Xu 537*93d43e7eSAnthony Xu xen_unmap_io_section(xen_domid, state->ioservid, section); 538*93d43e7eSAnthony Xu 539*93d43e7eSAnthony Xu memory_region_unref(mr); 540*93d43e7eSAnthony Xu } 541*93d43e7eSAnthony Xu 542*93d43e7eSAnthony Xu static void xen_device_realize(DeviceListener *listener, 543*93d43e7eSAnthony Xu DeviceState *dev) 544*93d43e7eSAnthony Xu { 545*93d43e7eSAnthony Xu XenIOState *state = container_of(listener, XenIOState, device_listener); 546*93d43e7eSAnthony Xu 547*93d43e7eSAnthony Xu if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 548*93d43e7eSAnthony Xu PCIDevice *pci_dev = PCI_DEVICE(dev); 549*93d43e7eSAnthony Xu 550*93d43e7eSAnthony Xu xen_map_pcidev(xen_domid, state->ioservid, pci_dev); 551*93d43e7eSAnthony Xu } 552*93d43e7eSAnthony Xu } 553*93d43e7eSAnthony Xu 554*93d43e7eSAnthony Xu static void xen_device_unrealize(DeviceListener *listener, 555*93d43e7eSAnthony Xu DeviceState *dev) 556*93d43e7eSAnthony Xu { 557*93d43e7eSAnthony Xu XenIOState *state = container_of(listener, XenIOState, device_listener); 558*93d43e7eSAnthony Xu 559*93d43e7eSAnthony Xu if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 560*93d43e7eSAnthony Xu PCIDevice *pci_dev = PCI_DEVICE(dev); 561*93d43e7eSAnthony Xu 562*93d43e7eSAnthony Xu xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); 563*93d43e7eSAnthony Xu } 564*93d43e7eSAnthony Xu } 565*93d43e7eSAnthony Xu 566*93d43e7eSAnthony Xu static void xen_sync_dirty_bitmap(XenIOState *state, 567*93d43e7eSAnthony Xu hwaddr start_addr, 568*93d43e7eSAnthony Xu ram_addr_t size) 569*93d43e7eSAnthony Xu { 570*93d43e7eSAnthony Xu hwaddr npages = size >> TARGET_PAGE_BITS; 571*93d43e7eSAnthony Xu const int width = sizeof(unsigned long) * 8; 572*93d43e7eSAnthony Xu unsigned long bitmap[DIV_ROUND_UP(npages, width)]; 573*93d43e7eSAnthony Xu int rc, i, j; 574*93d43e7eSAnthony Xu const XenPhysmap *physmap = NULL; 575*93d43e7eSAnthony Xu 576*93d43e7eSAnthony Xu physmap = get_physmapping(state, start_addr, size); 577*93d43e7eSAnthony Xu if (physmap == NULL) { 578*93d43e7eSAnthony Xu /* not handled */ 579*93d43e7eSAnthony Xu return; 580*93d43e7eSAnthony Xu } 581*93d43e7eSAnthony Xu 582*93d43e7eSAnthony Xu if (state->log_for_dirtybit == NULL) { 583*93d43e7eSAnthony Xu state->log_for_dirtybit = physmap; 584*93d43e7eSAnthony Xu } else if (state->log_for_dirtybit != physmap) { 585*93d43e7eSAnthony Xu /* Only one range for dirty bitmap can be tracked. */ 586*93d43e7eSAnthony Xu return; 587*93d43e7eSAnthony Xu } 588*93d43e7eSAnthony Xu 589*93d43e7eSAnthony Xu rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, 590*93d43e7eSAnthony Xu npages, bitmap); 591*93d43e7eSAnthony Xu if (rc < 0) { 592*93d43e7eSAnthony Xu #ifndef ENODATA 593*93d43e7eSAnthony Xu #define ENODATA ENOENT 594*93d43e7eSAnthony Xu #endif 595*93d43e7eSAnthony Xu if (errno == ENODATA) { 596*93d43e7eSAnthony Xu memory_region_set_dirty(framebuffer, 0, size); 597*93d43e7eSAnthony Xu DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx 598*93d43e7eSAnthony Xu ", 0x" TARGET_FMT_plx "): %s\n", 599*93d43e7eSAnthony Xu start_addr, start_addr + size, strerror(errno)); 600*93d43e7eSAnthony Xu } 601*93d43e7eSAnthony Xu return; 602*93d43e7eSAnthony Xu } 603*93d43e7eSAnthony Xu 604*93d43e7eSAnthony Xu for (i = 0; i < ARRAY_SIZE(bitmap); i++) { 605*93d43e7eSAnthony Xu unsigned long map = bitmap[i]; 606*93d43e7eSAnthony Xu while (map != 0) { 607*93d43e7eSAnthony Xu j = ctzl(map); 608*93d43e7eSAnthony Xu map &= ~(1ul << j); 609*93d43e7eSAnthony Xu memory_region_set_dirty(framebuffer, 610*93d43e7eSAnthony Xu (i * width + j) * TARGET_PAGE_SIZE, 611*93d43e7eSAnthony Xu TARGET_PAGE_SIZE); 612*93d43e7eSAnthony Xu }; 613*93d43e7eSAnthony Xu } 614*93d43e7eSAnthony Xu } 615*93d43e7eSAnthony Xu 616*93d43e7eSAnthony Xu static void xen_log_start(MemoryListener *listener, 617*93d43e7eSAnthony Xu MemoryRegionSection *section, 618*93d43e7eSAnthony Xu int old, int new) 619*93d43e7eSAnthony Xu { 620*93d43e7eSAnthony Xu XenIOState *state = container_of(listener, XenIOState, memory_listener); 621*93d43e7eSAnthony Xu 622*93d43e7eSAnthony Xu if (new & ~old & (1 << DIRTY_MEMORY_VGA)) { 623*93d43e7eSAnthony Xu xen_sync_dirty_bitmap(state, section->offset_within_address_space, 624*93d43e7eSAnthony Xu int128_get64(section->size)); 625*93d43e7eSAnthony Xu } 626*93d43e7eSAnthony Xu } 627*93d43e7eSAnthony Xu 628*93d43e7eSAnthony Xu static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, 629*93d43e7eSAnthony Xu int old, int new) 630*93d43e7eSAnthony Xu { 631*93d43e7eSAnthony Xu XenIOState *state = container_of(listener, XenIOState, memory_listener); 632*93d43e7eSAnthony Xu 633*93d43e7eSAnthony Xu if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { 634*93d43e7eSAnthony Xu state->log_for_dirtybit = NULL; 635*93d43e7eSAnthony Xu /* Disable dirty bit tracking */ 636*93d43e7eSAnthony Xu xen_track_dirty_vram(xen_domid, 0, 0, NULL); 637*93d43e7eSAnthony Xu } 638*93d43e7eSAnthony Xu } 639*93d43e7eSAnthony Xu 640*93d43e7eSAnthony Xu static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section) 641*93d43e7eSAnthony Xu { 642*93d43e7eSAnthony Xu XenIOState *state = container_of(listener, XenIOState, memory_listener); 643*93d43e7eSAnthony Xu 644*93d43e7eSAnthony Xu xen_sync_dirty_bitmap(state, section->offset_within_address_space, 645*93d43e7eSAnthony Xu int128_get64(section->size)); 646*93d43e7eSAnthony Xu } 647*93d43e7eSAnthony Xu 648*93d43e7eSAnthony Xu static void xen_log_global_start(MemoryListener *listener) 649*93d43e7eSAnthony Xu { 650*93d43e7eSAnthony Xu if (xen_enabled()) { 651*93d43e7eSAnthony Xu xen_in_migration = true; 652*93d43e7eSAnthony Xu } 653*93d43e7eSAnthony Xu } 654*93d43e7eSAnthony Xu 655*93d43e7eSAnthony Xu static void xen_log_global_stop(MemoryListener *listener) 656*93d43e7eSAnthony Xu { 657*93d43e7eSAnthony Xu xen_in_migration = false; 658*93d43e7eSAnthony Xu } 659*93d43e7eSAnthony Xu 660*93d43e7eSAnthony Xu static MemoryListener xen_memory_listener = { 661*93d43e7eSAnthony Xu .region_add = xen_region_add, 662*93d43e7eSAnthony Xu .region_del = xen_region_del, 663*93d43e7eSAnthony Xu .log_start = xen_log_start, 664*93d43e7eSAnthony Xu .log_stop = xen_log_stop, 665*93d43e7eSAnthony Xu .log_sync = xen_log_sync, 666*93d43e7eSAnthony Xu .log_global_start = xen_log_global_start, 667*93d43e7eSAnthony Xu .log_global_stop = xen_log_global_stop, 668*93d43e7eSAnthony Xu .priority = 10, 669*93d43e7eSAnthony Xu }; 670*93d43e7eSAnthony Xu 671*93d43e7eSAnthony Xu static MemoryListener xen_io_listener = { 672*93d43e7eSAnthony Xu .region_add = xen_io_add, 673*93d43e7eSAnthony Xu .region_del = xen_io_del, 674*93d43e7eSAnthony Xu .priority = 10, 675*93d43e7eSAnthony Xu }; 676*93d43e7eSAnthony Xu 677*93d43e7eSAnthony Xu static DeviceListener xen_device_listener = { 678*93d43e7eSAnthony Xu .realize = xen_device_realize, 679*93d43e7eSAnthony Xu .unrealize = xen_device_unrealize, 680*93d43e7eSAnthony Xu }; 681*93d43e7eSAnthony Xu 682*93d43e7eSAnthony Xu /* get the ioreq packets from share mem */ 683*93d43e7eSAnthony Xu static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) 684*93d43e7eSAnthony Xu { 685*93d43e7eSAnthony Xu ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); 686*93d43e7eSAnthony Xu 687*93d43e7eSAnthony Xu if (req->state != STATE_IOREQ_READY) { 688*93d43e7eSAnthony Xu DPRINTF("I/O request not ready: " 689*93d43e7eSAnthony Xu "%x, ptr: %x, port: %"PRIx64", " 690*93d43e7eSAnthony Xu "data: %"PRIx64", count: %u, size: %u\n", 691*93d43e7eSAnthony Xu req->state, req->data_is_ptr, req->addr, 692*93d43e7eSAnthony Xu req->data, req->count, req->size); 693*93d43e7eSAnthony Xu return NULL; 694*93d43e7eSAnthony Xu } 695*93d43e7eSAnthony Xu 696*93d43e7eSAnthony Xu xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ 697*93d43e7eSAnthony Xu 698*93d43e7eSAnthony Xu req->state = STATE_IOREQ_INPROCESS; 699*93d43e7eSAnthony Xu return req; 700*93d43e7eSAnthony Xu } 701*93d43e7eSAnthony Xu 702*93d43e7eSAnthony Xu /* use poll to get the port notification */ 703*93d43e7eSAnthony Xu /* ioreq_vec--out,the */ 704*93d43e7eSAnthony Xu /* retval--the number of ioreq packet */ 705*93d43e7eSAnthony Xu static ioreq_t *cpu_get_ioreq(XenIOState *state) 706*93d43e7eSAnthony Xu { 707*93d43e7eSAnthony Xu int i; 708*93d43e7eSAnthony Xu evtchn_port_t port; 709*93d43e7eSAnthony Xu 710*93d43e7eSAnthony Xu port = xenevtchn_pending(state->xce_handle); 711*93d43e7eSAnthony Xu if (port == state->bufioreq_local_port) { 712*93d43e7eSAnthony Xu timer_mod(state->buffered_io_timer, 713*93d43e7eSAnthony Xu BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 714*93d43e7eSAnthony Xu return NULL; 715*93d43e7eSAnthony Xu } 716*93d43e7eSAnthony Xu 717*93d43e7eSAnthony Xu if (port != -1) { 718*93d43e7eSAnthony Xu for (i = 0; i < max_cpus; i++) { 719*93d43e7eSAnthony Xu if (state->ioreq_local_port[i] == port) { 720*93d43e7eSAnthony Xu break; 721*93d43e7eSAnthony Xu } 722*93d43e7eSAnthony Xu } 723*93d43e7eSAnthony Xu 724*93d43e7eSAnthony Xu if (i == max_cpus) { 725*93d43e7eSAnthony Xu hw_error("Fatal error while trying to get io event!\n"); 726*93d43e7eSAnthony Xu } 727*93d43e7eSAnthony Xu 728*93d43e7eSAnthony Xu /* unmask the wanted port again */ 729*93d43e7eSAnthony Xu xenevtchn_unmask(state->xce_handle, port); 730*93d43e7eSAnthony Xu 731*93d43e7eSAnthony Xu /* get the io packet from shared memory */ 732*93d43e7eSAnthony Xu state->send_vcpu = i; 733*93d43e7eSAnthony Xu return cpu_get_ioreq_from_shared_memory(state, i); 734*93d43e7eSAnthony Xu } 735*93d43e7eSAnthony Xu 736*93d43e7eSAnthony Xu /* read error or read nothing */ 737*93d43e7eSAnthony Xu return NULL; 738*93d43e7eSAnthony Xu } 739*93d43e7eSAnthony Xu 740*93d43e7eSAnthony Xu static uint32_t do_inp(uint32_t addr, unsigned long size) 741*93d43e7eSAnthony Xu { 742*93d43e7eSAnthony Xu switch (size) { 743*93d43e7eSAnthony Xu case 1: 744*93d43e7eSAnthony Xu return cpu_inb(addr); 745*93d43e7eSAnthony Xu case 2: 746*93d43e7eSAnthony Xu return cpu_inw(addr); 747*93d43e7eSAnthony Xu case 4: 748*93d43e7eSAnthony Xu return cpu_inl(addr); 749*93d43e7eSAnthony Xu default: 750*93d43e7eSAnthony Xu hw_error("inp: bad size: %04x %lx", addr, size); 751*93d43e7eSAnthony Xu } 752*93d43e7eSAnthony Xu } 753*93d43e7eSAnthony Xu 754*93d43e7eSAnthony Xu static void do_outp(uint32_t addr, 755*93d43e7eSAnthony Xu unsigned long size, uint32_t val) 756*93d43e7eSAnthony Xu { 757*93d43e7eSAnthony Xu switch (size) { 758*93d43e7eSAnthony Xu case 1: 759*93d43e7eSAnthony Xu return cpu_outb(addr, val); 760*93d43e7eSAnthony Xu case 2: 761*93d43e7eSAnthony Xu return cpu_outw(addr, val); 762*93d43e7eSAnthony Xu case 4: 763*93d43e7eSAnthony Xu return cpu_outl(addr, val); 764*93d43e7eSAnthony Xu default: 765*93d43e7eSAnthony Xu hw_error("outp: bad size: %04x %lx", addr, size); 766*93d43e7eSAnthony Xu } 767*93d43e7eSAnthony Xu } 768*93d43e7eSAnthony Xu 769*93d43e7eSAnthony Xu /* 770*93d43e7eSAnthony Xu * Helper functions which read/write an object from/to physical guest 771*93d43e7eSAnthony Xu * memory, as part of the implementation of an ioreq. 772*93d43e7eSAnthony Xu * 773*93d43e7eSAnthony Xu * Equivalent to 774*93d43e7eSAnthony Xu * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, 775*93d43e7eSAnthony Xu * val, req->size, 0/1) 776*93d43e7eSAnthony Xu * except without the integer overflow problems. 777*93d43e7eSAnthony Xu */ 778*93d43e7eSAnthony Xu static void rw_phys_req_item(hwaddr addr, 779*93d43e7eSAnthony Xu ioreq_t *req, uint32_t i, void *val, int rw) 780*93d43e7eSAnthony Xu { 781*93d43e7eSAnthony Xu /* Do everything unsigned so overflow just results in a truncated result 782*93d43e7eSAnthony Xu * and accesses to undesired parts of guest memory, which is up 783*93d43e7eSAnthony Xu * to the guest */ 784*93d43e7eSAnthony Xu hwaddr offset = (hwaddr)req->size * i; 785*93d43e7eSAnthony Xu if (req->df) { 786*93d43e7eSAnthony Xu addr -= offset; 787*93d43e7eSAnthony Xu } else { 788*93d43e7eSAnthony Xu addr += offset; 789*93d43e7eSAnthony Xu } 790*93d43e7eSAnthony Xu cpu_physical_memory_rw(addr, val, req->size, rw); 791*93d43e7eSAnthony Xu } 792*93d43e7eSAnthony Xu 793*93d43e7eSAnthony Xu static inline void read_phys_req_item(hwaddr addr, 794*93d43e7eSAnthony Xu ioreq_t *req, uint32_t i, void *val) 795*93d43e7eSAnthony Xu { 796*93d43e7eSAnthony Xu rw_phys_req_item(addr, req, i, val, 0); 797*93d43e7eSAnthony Xu } 798*93d43e7eSAnthony Xu static inline void write_phys_req_item(hwaddr addr, 799*93d43e7eSAnthony Xu ioreq_t *req, uint32_t i, void *val) 800*93d43e7eSAnthony Xu { 801*93d43e7eSAnthony Xu rw_phys_req_item(addr, req, i, val, 1); 802*93d43e7eSAnthony Xu } 803*93d43e7eSAnthony Xu 804*93d43e7eSAnthony Xu 805*93d43e7eSAnthony Xu static void cpu_ioreq_pio(ioreq_t *req) 806*93d43e7eSAnthony Xu { 807*93d43e7eSAnthony Xu uint32_t i; 808*93d43e7eSAnthony Xu 809*93d43e7eSAnthony Xu trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, 810*93d43e7eSAnthony Xu req->data, req->count, req->size); 811*93d43e7eSAnthony Xu 812*93d43e7eSAnthony Xu if (req->size > sizeof(uint32_t)) { 813*93d43e7eSAnthony Xu hw_error("PIO: bad size (%u)", req->size); 814*93d43e7eSAnthony Xu } 815*93d43e7eSAnthony Xu 816*93d43e7eSAnthony Xu if (req->dir == IOREQ_READ) { 817*93d43e7eSAnthony Xu if (!req->data_is_ptr) { 818*93d43e7eSAnthony Xu req->data = do_inp(req->addr, req->size); 819*93d43e7eSAnthony Xu trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, 820*93d43e7eSAnthony Xu req->size); 821*93d43e7eSAnthony Xu } else { 822*93d43e7eSAnthony Xu uint32_t tmp; 823*93d43e7eSAnthony Xu 824*93d43e7eSAnthony Xu for (i = 0; i < req->count; i++) { 825*93d43e7eSAnthony Xu tmp = do_inp(req->addr, req->size); 826*93d43e7eSAnthony Xu write_phys_req_item(req->data, req, i, &tmp); 827*93d43e7eSAnthony Xu } 828*93d43e7eSAnthony Xu } 829*93d43e7eSAnthony Xu } else if (req->dir == IOREQ_WRITE) { 830*93d43e7eSAnthony Xu if (!req->data_is_ptr) { 831*93d43e7eSAnthony Xu trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, 832*93d43e7eSAnthony Xu req->size); 833*93d43e7eSAnthony Xu do_outp(req->addr, req->size, req->data); 834*93d43e7eSAnthony Xu } else { 835*93d43e7eSAnthony Xu for (i = 0; i < req->count; i++) { 836*93d43e7eSAnthony Xu uint32_t tmp = 0; 837*93d43e7eSAnthony Xu 838*93d43e7eSAnthony Xu read_phys_req_item(req->data, req, i, &tmp); 839*93d43e7eSAnthony Xu do_outp(req->addr, req->size, tmp); 840*93d43e7eSAnthony Xu } 841*93d43e7eSAnthony Xu } 842*93d43e7eSAnthony Xu } 843*93d43e7eSAnthony Xu } 844*93d43e7eSAnthony Xu 845*93d43e7eSAnthony Xu static void cpu_ioreq_move(ioreq_t *req) 846*93d43e7eSAnthony Xu { 847*93d43e7eSAnthony Xu uint32_t i; 848*93d43e7eSAnthony Xu 849*93d43e7eSAnthony Xu trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, 850*93d43e7eSAnthony Xu req->data, req->count, req->size); 851*93d43e7eSAnthony Xu 852*93d43e7eSAnthony Xu if (req->size > sizeof(req->data)) { 853*93d43e7eSAnthony Xu hw_error("MMIO: bad size (%u)", req->size); 854*93d43e7eSAnthony Xu } 855*93d43e7eSAnthony Xu 856*93d43e7eSAnthony Xu if (!req->data_is_ptr) { 857*93d43e7eSAnthony Xu if (req->dir == IOREQ_READ) { 858*93d43e7eSAnthony Xu for (i = 0; i < req->count; i++) { 859*93d43e7eSAnthony Xu read_phys_req_item(req->addr, req, i, &req->data); 860*93d43e7eSAnthony Xu } 861*93d43e7eSAnthony Xu } else if (req->dir == IOREQ_WRITE) { 862*93d43e7eSAnthony Xu for (i = 0; i < req->count; i++) { 863*93d43e7eSAnthony Xu write_phys_req_item(req->addr, req, i, &req->data); 864*93d43e7eSAnthony Xu } 865*93d43e7eSAnthony Xu } 866*93d43e7eSAnthony Xu } else { 867*93d43e7eSAnthony Xu uint64_t tmp; 868*93d43e7eSAnthony Xu 869*93d43e7eSAnthony Xu if (req->dir == IOREQ_READ) { 870*93d43e7eSAnthony Xu for (i = 0; i < req->count; i++) { 871*93d43e7eSAnthony Xu read_phys_req_item(req->addr, req, i, &tmp); 872*93d43e7eSAnthony Xu write_phys_req_item(req->data, req, i, &tmp); 873*93d43e7eSAnthony Xu } 874*93d43e7eSAnthony Xu } else if (req->dir == IOREQ_WRITE) { 875*93d43e7eSAnthony Xu for (i = 0; i < req->count; i++) { 876*93d43e7eSAnthony Xu read_phys_req_item(req->data, req, i, &tmp); 877*93d43e7eSAnthony Xu write_phys_req_item(req->addr, req, i, &tmp); 878*93d43e7eSAnthony Xu } 879*93d43e7eSAnthony Xu } 880*93d43e7eSAnthony Xu } 881*93d43e7eSAnthony Xu } 882*93d43e7eSAnthony Xu 883*93d43e7eSAnthony Xu static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req) 884*93d43e7eSAnthony Xu { 885*93d43e7eSAnthony Xu X86CPU *cpu; 886*93d43e7eSAnthony Xu CPUX86State *env; 887*93d43e7eSAnthony Xu 888*93d43e7eSAnthony Xu cpu = X86_CPU(current_cpu); 889*93d43e7eSAnthony Xu env = &cpu->env; 890*93d43e7eSAnthony Xu env->regs[R_EAX] = req->data; 891*93d43e7eSAnthony Xu env->regs[R_EBX] = vmport_regs->ebx; 892*93d43e7eSAnthony Xu env->regs[R_ECX] = vmport_regs->ecx; 893*93d43e7eSAnthony Xu env->regs[R_EDX] = vmport_regs->edx; 894*93d43e7eSAnthony Xu env->regs[R_ESI] = vmport_regs->esi; 895*93d43e7eSAnthony Xu env->regs[R_EDI] = vmport_regs->edi; 896*93d43e7eSAnthony Xu } 897*93d43e7eSAnthony Xu 898*93d43e7eSAnthony Xu static void regs_from_cpu(vmware_regs_t *vmport_regs) 899*93d43e7eSAnthony Xu { 900*93d43e7eSAnthony Xu X86CPU *cpu = X86_CPU(current_cpu); 901*93d43e7eSAnthony Xu CPUX86State *env = &cpu->env; 902*93d43e7eSAnthony Xu 903*93d43e7eSAnthony Xu vmport_regs->ebx = env->regs[R_EBX]; 904*93d43e7eSAnthony Xu vmport_regs->ecx = env->regs[R_ECX]; 905*93d43e7eSAnthony Xu vmport_regs->edx = env->regs[R_EDX]; 906*93d43e7eSAnthony Xu vmport_regs->esi = env->regs[R_ESI]; 907*93d43e7eSAnthony Xu vmport_regs->edi = env->regs[R_EDI]; 908*93d43e7eSAnthony Xu } 909*93d43e7eSAnthony Xu 910*93d43e7eSAnthony Xu static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req) 911*93d43e7eSAnthony Xu { 912*93d43e7eSAnthony Xu vmware_regs_t *vmport_regs; 913*93d43e7eSAnthony Xu 914*93d43e7eSAnthony Xu assert(state->shared_vmport_page); 915*93d43e7eSAnthony Xu vmport_regs = 916*93d43e7eSAnthony Xu &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu]; 917*93d43e7eSAnthony Xu QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs)); 918*93d43e7eSAnthony Xu 919*93d43e7eSAnthony Xu current_cpu = state->cpu_by_vcpu_id[state->send_vcpu]; 920*93d43e7eSAnthony Xu regs_to_cpu(vmport_regs, req); 921*93d43e7eSAnthony Xu cpu_ioreq_pio(req); 922*93d43e7eSAnthony Xu regs_from_cpu(vmport_regs); 923*93d43e7eSAnthony Xu current_cpu = NULL; 924*93d43e7eSAnthony Xu } 925*93d43e7eSAnthony Xu 926*93d43e7eSAnthony Xu static void handle_ioreq(XenIOState *state, ioreq_t *req) 927*93d43e7eSAnthony Xu { 928*93d43e7eSAnthony Xu trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, 929*93d43e7eSAnthony Xu req->addr, req->data, req->count, req->size); 930*93d43e7eSAnthony Xu 931*93d43e7eSAnthony Xu if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && 932*93d43e7eSAnthony Xu (req->size < sizeof (target_ulong))) { 933*93d43e7eSAnthony Xu req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; 934*93d43e7eSAnthony Xu } 935*93d43e7eSAnthony Xu 936*93d43e7eSAnthony Xu if (req->dir == IOREQ_WRITE) 937*93d43e7eSAnthony Xu trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, 938*93d43e7eSAnthony Xu req->addr, req->data, req->count, req->size); 939*93d43e7eSAnthony Xu 940*93d43e7eSAnthony Xu switch (req->type) { 941*93d43e7eSAnthony Xu case IOREQ_TYPE_PIO: 942*93d43e7eSAnthony Xu cpu_ioreq_pio(req); 943*93d43e7eSAnthony Xu break; 944*93d43e7eSAnthony Xu case IOREQ_TYPE_COPY: 945*93d43e7eSAnthony Xu cpu_ioreq_move(req); 946*93d43e7eSAnthony Xu break; 947*93d43e7eSAnthony Xu case IOREQ_TYPE_VMWARE_PORT: 948*93d43e7eSAnthony Xu handle_vmport_ioreq(state, req); 949*93d43e7eSAnthony Xu break; 950*93d43e7eSAnthony Xu case IOREQ_TYPE_TIMEOFFSET: 951*93d43e7eSAnthony Xu break; 952*93d43e7eSAnthony Xu case IOREQ_TYPE_INVALIDATE: 953*93d43e7eSAnthony Xu xen_invalidate_map_cache(); 954*93d43e7eSAnthony Xu break; 955*93d43e7eSAnthony Xu case IOREQ_TYPE_PCI_CONFIG: { 956*93d43e7eSAnthony Xu uint32_t sbdf = req->addr >> 32; 957*93d43e7eSAnthony Xu uint32_t val; 958*93d43e7eSAnthony Xu 959*93d43e7eSAnthony Xu /* Fake a write to port 0xCF8 so that 960*93d43e7eSAnthony Xu * the config space access will target the 961*93d43e7eSAnthony Xu * correct device model. 962*93d43e7eSAnthony Xu */ 963*93d43e7eSAnthony Xu val = (1u << 31) | 964*93d43e7eSAnthony Xu ((req->addr & 0x0f00) << 16) | 965*93d43e7eSAnthony Xu ((sbdf & 0xffff) << 8) | 966*93d43e7eSAnthony Xu (req->addr & 0xfc); 967*93d43e7eSAnthony Xu do_outp(0xcf8, 4, val); 968*93d43e7eSAnthony Xu 969*93d43e7eSAnthony Xu /* Now issue the config space access via 970*93d43e7eSAnthony Xu * port 0xCFC 971*93d43e7eSAnthony Xu */ 972*93d43e7eSAnthony Xu req->addr = 0xcfc | (req->addr & 0x03); 973*93d43e7eSAnthony Xu cpu_ioreq_pio(req); 974*93d43e7eSAnthony Xu break; 975*93d43e7eSAnthony Xu } 976*93d43e7eSAnthony Xu default: 977*93d43e7eSAnthony Xu hw_error("Invalid ioreq type 0x%x\n", req->type); 978*93d43e7eSAnthony Xu } 979*93d43e7eSAnthony Xu if (req->dir == IOREQ_READ) { 980*93d43e7eSAnthony Xu trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, 981*93d43e7eSAnthony Xu req->addr, req->data, req->count, req->size); 982*93d43e7eSAnthony Xu } 983*93d43e7eSAnthony Xu } 984*93d43e7eSAnthony Xu 985*93d43e7eSAnthony Xu static int handle_buffered_iopage(XenIOState *state) 986*93d43e7eSAnthony Xu { 987*93d43e7eSAnthony Xu buffered_iopage_t *buf_page = state->buffered_io_page; 988*93d43e7eSAnthony Xu buf_ioreq_t *buf_req = NULL; 989*93d43e7eSAnthony Xu ioreq_t req; 990*93d43e7eSAnthony Xu int qw; 991*93d43e7eSAnthony Xu 992*93d43e7eSAnthony Xu if (!buf_page) { 993*93d43e7eSAnthony Xu return 0; 994*93d43e7eSAnthony Xu } 995*93d43e7eSAnthony Xu 996*93d43e7eSAnthony Xu memset(&req, 0x00, sizeof(req)); 997*93d43e7eSAnthony Xu req.state = STATE_IOREQ_READY; 998*93d43e7eSAnthony Xu req.count = 1; 999*93d43e7eSAnthony Xu req.dir = IOREQ_WRITE; 1000*93d43e7eSAnthony Xu 1001*93d43e7eSAnthony Xu for (;;) { 1002*93d43e7eSAnthony Xu uint32_t rdptr = buf_page->read_pointer, wrptr; 1003*93d43e7eSAnthony Xu 1004*93d43e7eSAnthony Xu xen_rmb(); 1005*93d43e7eSAnthony Xu wrptr = buf_page->write_pointer; 1006*93d43e7eSAnthony Xu xen_rmb(); 1007*93d43e7eSAnthony Xu if (rdptr != buf_page->read_pointer) { 1008*93d43e7eSAnthony Xu continue; 1009*93d43e7eSAnthony Xu } 1010*93d43e7eSAnthony Xu if (rdptr == wrptr) { 1011*93d43e7eSAnthony Xu break; 1012*93d43e7eSAnthony Xu } 1013*93d43e7eSAnthony Xu buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; 1014*93d43e7eSAnthony Xu req.size = 1U << buf_req->size; 1015*93d43e7eSAnthony Xu req.addr = buf_req->addr; 1016*93d43e7eSAnthony Xu req.data = buf_req->data; 1017*93d43e7eSAnthony Xu req.type = buf_req->type; 1018*93d43e7eSAnthony Xu xen_rmb(); 1019*93d43e7eSAnthony Xu qw = (req.size == 8); 1020*93d43e7eSAnthony Xu if (qw) { 1021*93d43e7eSAnthony Xu if (rdptr + 1 == wrptr) { 1022*93d43e7eSAnthony Xu hw_error("Incomplete quad word buffered ioreq"); 1023*93d43e7eSAnthony Xu } 1024*93d43e7eSAnthony Xu buf_req = &buf_page->buf_ioreq[(rdptr + 1) % 1025*93d43e7eSAnthony Xu IOREQ_BUFFER_SLOT_NUM]; 1026*93d43e7eSAnthony Xu req.data |= ((uint64_t)buf_req->data) << 32; 1027*93d43e7eSAnthony Xu xen_rmb(); 1028*93d43e7eSAnthony Xu } 1029*93d43e7eSAnthony Xu 1030*93d43e7eSAnthony Xu handle_ioreq(state, &req); 1031*93d43e7eSAnthony Xu 1032*93d43e7eSAnthony Xu /* Only req.data may get updated by handle_ioreq(), albeit even that 1033*93d43e7eSAnthony Xu * should not happen as such data would never make it to the guest (we 1034*93d43e7eSAnthony Xu * can only usefully see writes here after all). 1035*93d43e7eSAnthony Xu */ 1036*93d43e7eSAnthony Xu assert(req.state == STATE_IOREQ_READY); 1037*93d43e7eSAnthony Xu assert(req.count == 1); 1038*93d43e7eSAnthony Xu assert(req.dir == IOREQ_WRITE); 1039*93d43e7eSAnthony Xu assert(!req.data_is_ptr); 1040*93d43e7eSAnthony Xu 1041*93d43e7eSAnthony Xu atomic_add(&buf_page->read_pointer, qw + 1); 1042*93d43e7eSAnthony Xu } 1043*93d43e7eSAnthony Xu 1044*93d43e7eSAnthony Xu return req.count; 1045*93d43e7eSAnthony Xu } 1046*93d43e7eSAnthony Xu 1047*93d43e7eSAnthony Xu static void handle_buffered_io(void *opaque) 1048*93d43e7eSAnthony Xu { 1049*93d43e7eSAnthony Xu XenIOState *state = opaque; 1050*93d43e7eSAnthony Xu 1051*93d43e7eSAnthony Xu if (handle_buffered_iopage(state)) { 1052*93d43e7eSAnthony Xu timer_mod(state->buffered_io_timer, 1053*93d43e7eSAnthony Xu BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 1054*93d43e7eSAnthony Xu } else { 1055*93d43e7eSAnthony Xu timer_del(state->buffered_io_timer); 1056*93d43e7eSAnthony Xu xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port); 1057*93d43e7eSAnthony Xu } 1058*93d43e7eSAnthony Xu } 1059*93d43e7eSAnthony Xu 1060*93d43e7eSAnthony Xu static void cpu_handle_ioreq(void *opaque) 1061*93d43e7eSAnthony Xu { 1062*93d43e7eSAnthony Xu XenIOState *state = opaque; 1063*93d43e7eSAnthony Xu ioreq_t *req = cpu_get_ioreq(state); 1064*93d43e7eSAnthony Xu 1065*93d43e7eSAnthony Xu handle_buffered_iopage(state); 1066*93d43e7eSAnthony Xu if (req) { 1067*93d43e7eSAnthony Xu ioreq_t copy = *req; 1068*93d43e7eSAnthony Xu 1069*93d43e7eSAnthony Xu xen_rmb(); 1070*93d43e7eSAnthony Xu handle_ioreq(state, ©); 1071*93d43e7eSAnthony Xu req->data = copy.data; 1072*93d43e7eSAnthony Xu 1073*93d43e7eSAnthony Xu if (req->state != STATE_IOREQ_INPROCESS) { 1074*93d43e7eSAnthony Xu fprintf(stderr, "Badness in I/O request ... not in service?!: " 1075*93d43e7eSAnthony Xu "%x, ptr: %x, port: %"PRIx64", " 1076*93d43e7eSAnthony Xu "data: %"PRIx64", count: %u, size: %u, type: %u\n", 1077*93d43e7eSAnthony Xu req->state, req->data_is_ptr, req->addr, 1078*93d43e7eSAnthony Xu req->data, req->count, req->size, req->type); 1079*93d43e7eSAnthony Xu destroy_hvm_domain(false); 1080*93d43e7eSAnthony Xu return; 1081*93d43e7eSAnthony Xu } 1082*93d43e7eSAnthony Xu 1083*93d43e7eSAnthony Xu xen_wmb(); /* Update ioreq contents /then/ update state. */ 1084*93d43e7eSAnthony Xu 1085*93d43e7eSAnthony Xu /* 1086*93d43e7eSAnthony Xu * We do this before we send the response so that the tools 1087*93d43e7eSAnthony Xu * have the opportunity to pick up on the reset before the 1088*93d43e7eSAnthony Xu * guest resumes and does a hlt with interrupts disabled which 1089*93d43e7eSAnthony Xu * causes Xen to powerdown the domain. 1090*93d43e7eSAnthony Xu */ 1091*93d43e7eSAnthony Xu if (runstate_is_running()) { 1092*93d43e7eSAnthony Xu if (qemu_shutdown_requested_get()) { 1093*93d43e7eSAnthony Xu destroy_hvm_domain(false); 1094*93d43e7eSAnthony Xu } 1095*93d43e7eSAnthony Xu if (qemu_reset_requested_get()) { 1096*93d43e7eSAnthony Xu qemu_system_reset(VMRESET_REPORT); 1097*93d43e7eSAnthony Xu destroy_hvm_domain(true); 1098*93d43e7eSAnthony Xu } 1099*93d43e7eSAnthony Xu } 1100*93d43e7eSAnthony Xu 1101*93d43e7eSAnthony Xu req->state = STATE_IORESP_READY; 1102*93d43e7eSAnthony Xu xenevtchn_notify(state->xce_handle, 1103*93d43e7eSAnthony Xu state->ioreq_local_port[state->send_vcpu]); 1104*93d43e7eSAnthony Xu } 1105*93d43e7eSAnthony Xu } 1106*93d43e7eSAnthony Xu 1107*93d43e7eSAnthony Xu static void xen_main_loop_prepare(XenIOState *state) 1108*93d43e7eSAnthony Xu { 1109*93d43e7eSAnthony Xu int evtchn_fd = -1; 1110*93d43e7eSAnthony Xu 1111*93d43e7eSAnthony Xu if (state->xce_handle != NULL) { 1112*93d43e7eSAnthony Xu evtchn_fd = xenevtchn_fd(state->xce_handle); 1113*93d43e7eSAnthony Xu } 1114*93d43e7eSAnthony Xu 1115*93d43e7eSAnthony Xu state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, 1116*93d43e7eSAnthony Xu state); 1117*93d43e7eSAnthony Xu 1118*93d43e7eSAnthony Xu if (evtchn_fd != -1) { 1119*93d43e7eSAnthony Xu CPUState *cpu_state; 1120*93d43e7eSAnthony Xu 1121*93d43e7eSAnthony Xu DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); 1122*93d43e7eSAnthony Xu CPU_FOREACH(cpu_state) { 1123*93d43e7eSAnthony Xu DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", 1124*93d43e7eSAnthony Xu __func__, cpu_state->cpu_index, cpu_state); 1125*93d43e7eSAnthony Xu state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; 1126*93d43e7eSAnthony Xu } 1127*93d43e7eSAnthony Xu qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); 1128*93d43e7eSAnthony Xu } 1129*93d43e7eSAnthony Xu } 1130*93d43e7eSAnthony Xu 1131*93d43e7eSAnthony Xu 1132*93d43e7eSAnthony Xu static void xen_hvm_change_state_handler(void *opaque, int running, 1133*93d43e7eSAnthony Xu RunState rstate) 1134*93d43e7eSAnthony Xu { 1135*93d43e7eSAnthony Xu XenIOState *state = opaque; 1136*93d43e7eSAnthony Xu 1137*93d43e7eSAnthony Xu if (running) { 1138*93d43e7eSAnthony Xu xen_main_loop_prepare(state); 1139*93d43e7eSAnthony Xu } 1140*93d43e7eSAnthony Xu 1141*93d43e7eSAnthony Xu xen_set_ioreq_server_state(xen_domid, 1142*93d43e7eSAnthony Xu state->ioservid, 1143*93d43e7eSAnthony Xu (rstate == RUN_STATE_RUNNING)); 1144*93d43e7eSAnthony Xu } 1145*93d43e7eSAnthony Xu 1146*93d43e7eSAnthony Xu static void xen_exit_notifier(Notifier *n, void *data) 1147*93d43e7eSAnthony Xu { 1148*93d43e7eSAnthony Xu XenIOState *state = container_of(n, XenIOState, exit); 1149*93d43e7eSAnthony Xu 1150*93d43e7eSAnthony Xu xenevtchn_close(state->xce_handle); 1151*93d43e7eSAnthony Xu xs_daemon_close(state->xenstore); 1152*93d43e7eSAnthony Xu } 1153*93d43e7eSAnthony Xu 1154*93d43e7eSAnthony Xu static void xen_read_physmap(XenIOState *state) 1155*93d43e7eSAnthony Xu { 1156*93d43e7eSAnthony Xu XenPhysmap *physmap = NULL; 1157*93d43e7eSAnthony Xu unsigned int len, num, i; 1158*93d43e7eSAnthony Xu char path[80], *value = NULL; 1159*93d43e7eSAnthony Xu char **entries = NULL; 1160*93d43e7eSAnthony Xu 1161*93d43e7eSAnthony Xu snprintf(path, sizeof(path), 1162*93d43e7eSAnthony Xu "/local/domain/0/device-model/%d/physmap", xen_domid); 1163*93d43e7eSAnthony Xu entries = xs_directory(state->xenstore, 0, path, &num); 1164*93d43e7eSAnthony Xu if (entries == NULL) 1165*93d43e7eSAnthony Xu return; 1166*93d43e7eSAnthony Xu 1167*93d43e7eSAnthony Xu for (i = 0; i < num; i++) { 1168*93d43e7eSAnthony Xu physmap = g_malloc(sizeof (XenPhysmap)); 1169*93d43e7eSAnthony Xu physmap->phys_offset = strtoull(entries[i], NULL, 16); 1170*93d43e7eSAnthony Xu snprintf(path, sizeof(path), 1171*93d43e7eSAnthony Xu "/local/domain/0/device-model/%d/physmap/%s/start_addr", 1172*93d43e7eSAnthony Xu xen_domid, entries[i]); 1173*93d43e7eSAnthony Xu value = xs_read(state->xenstore, 0, path, &len); 1174*93d43e7eSAnthony Xu if (value == NULL) { 1175*93d43e7eSAnthony Xu g_free(physmap); 1176*93d43e7eSAnthony Xu continue; 1177*93d43e7eSAnthony Xu } 1178*93d43e7eSAnthony Xu physmap->start_addr = strtoull(value, NULL, 16); 1179*93d43e7eSAnthony Xu free(value); 1180*93d43e7eSAnthony Xu 1181*93d43e7eSAnthony Xu snprintf(path, sizeof(path), 1182*93d43e7eSAnthony Xu "/local/domain/0/device-model/%d/physmap/%s/size", 1183*93d43e7eSAnthony Xu xen_domid, entries[i]); 1184*93d43e7eSAnthony Xu value = xs_read(state->xenstore, 0, path, &len); 1185*93d43e7eSAnthony Xu if (value == NULL) { 1186*93d43e7eSAnthony Xu g_free(physmap); 1187*93d43e7eSAnthony Xu continue; 1188*93d43e7eSAnthony Xu } 1189*93d43e7eSAnthony Xu physmap->size = strtoull(value, NULL, 16); 1190*93d43e7eSAnthony Xu free(value); 1191*93d43e7eSAnthony Xu 1192*93d43e7eSAnthony Xu snprintf(path, sizeof(path), 1193*93d43e7eSAnthony Xu "/local/domain/0/device-model/%d/physmap/%s/name", 1194*93d43e7eSAnthony Xu xen_domid, entries[i]); 1195*93d43e7eSAnthony Xu physmap->name = xs_read(state->xenstore, 0, path, &len); 1196*93d43e7eSAnthony Xu 1197*93d43e7eSAnthony Xu QLIST_INSERT_HEAD(&state->physmap, physmap, list); 1198*93d43e7eSAnthony Xu } 1199*93d43e7eSAnthony Xu free(entries); 1200*93d43e7eSAnthony Xu } 1201*93d43e7eSAnthony Xu 1202*93d43e7eSAnthony Xu static void xen_wakeup_notifier(Notifier *notifier, void *data) 1203*93d43e7eSAnthony Xu { 1204*93d43e7eSAnthony Xu xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0); 1205*93d43e7eSAnthony Xu } 1206*93d43e7eSAnthony Xu 1207*93d43e7eSAnthony Xu void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) 1208*93d43e7eSAnthony Xu { 1209*93d43e7eSAnthony Xu int i, rc; 1210*93d43e7eSAnthony Xu xen_pfn_t ioreq_pfn; 1211*93d43e7eSAnthony Xu xen_pfn_t bufioreq_pfn; 1212*93d43e7eSAnthony Xu evtchn_port_t bufioreq_evtchn; 1213*93d43e7eSAnthony Xu XenIOState *state; 1214*93d43e7eSAnthony Xu 1215*93d43e7eSAnthony Xu state = g_malloc0(sizeof (XenIOState)); 1216*93d43e7eSAnthony Xu 1217*93d43e7eSAnthony Xu state->xce_handle = xenevtchn_open(NULL, 0); 1218*93d43e7eSAnthony Xu if (state->xce_handle == NULL) { 1219*93d43e7eSAnthony Xu perror("xen: event channel open"); 1220*93d43e7eSAnthony Xu goto err; 1221*93d43e7eSAnthony Xu } 1222*93d43e7eSAnthony Xu 1223*93d43e7eSAnthony Xu state->xenstore = xs_daemon_open(); 1224*93d43e7eSAnthony Xu if (state->xenstore == NULL) { 1225*93d43e7eSAnthony Xu perror("xen: xenstore open"); 1226*93d43e7eSAnthony Xu goto err; 1227*93d43e7eSAnthony Xu } 1228*93d43e7eSAnthony Xu 1229*93d43e7eSAnthony Xu if (xen_domid_restrict) { 1230*93d43e7eSAnthony Xu rc = xen_restrict(xen_domid); 1231*93d43e7eSAnthony Xu if (rc < 0) { 1232*93d43e7eSAnthony Xu error_report("failed to restrict: error %d", errno); 1233*93d43e7eSAnthony Xu goto err; 1234*93d43e7eSAnthony Xu } 1235*93d43e7eSAnthony Xu } 1236*93d43e7eSAnthony Xu 1237*93d43e7eSAnthony Xu xen_create_ioreq_server(xen_domid, &state->ioservid); 1238*93d43e7eSAnthony Xu 1239*93d43e7eSAnthony Xu state->exit.notify = xen_exit_notifier; 1240*93d43e7eSAnthony Xu qemu_add_exit_notifier(&state->exit); 1241*93d43e7eSAnthony Xu 1242*93d43e7eSAnthony Xu state->suspend.notify = xen_suspend_notifier; 1243*93d43e7eSAnthony Xu qemu_register_suspend_notifier(&state->suspend); 1244*93d43e7eSAnthony Xu 1245*93d43e7eSAnthony Xu state->wakeup.notify = xen_wakeup_notifier; 1246*93d43e7eSAnthony Xu qemu_register_wakeup_notifier(&state->wakeup); 1247*93d43e7eSAnthony Xu 1248*93d43e7eSAnthony Xu rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, 1249*93d43e7eSAnthony Xu &ioreq_pfn, &bufioreq_pfn, 1250*93d43e7eSAnthony Xu &bufioreq_evtchn); 1251*93d43e7eSAnthony Xu if (rc < 0) { 1252*93d43e7eSAnthony Xu error_report("failed to get ioreq server info: error %d handle=%p", 1253*93d43e7eSAnthony Xu errno, xen_xc); 1254*93d43e7eSAnthony Xu goto err; 1255*93d43e7eSAnthony Xu } 1256*93d43e7eSAnthony Xu 1257*93d43e7eSAnthony Xu DPRINTF("shared page at pfn %lx\n", ioreq_pfn); 1258*93d43e7eSAnthony Xu DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); 1259*93d43e7eSAnthony Xu DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); 1260*93d43e7eSAnthony Xu 1261*93d43e7eSAnthony Xu state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, 1262*93d43e7eSAnthony Xu PROT_READ|PROT_WRITE, 1263*93d43e7eSAnthony Xu 1, &ioreq_pfn, NULL); 1264*93d43e7eSAnthony Xu if (state->shared_page == NULL) { 1265*93d43e7eSAnthony Xu error_report("map shared IO page returned error %d handle=%p", 1266*93d43e7eSAnthony Xu errno, xen_xc); 1267*93d43e7eSAnthony Xu goto err; 1268*93d43e7eSAnthony Xu } 1269*93d43e7eSAnthony Xu 1270*93d43e7eSAnthony Xu rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); 1271*93d43e7eSAnthony Xu if (!rc) { 1272*93d43e7eSAnthony Xu DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn); 1273*93d43e7eSAnthony Xu state->shared_vmport_page = 1274*93d43e7eSAnthony Xu xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, 1275*93d43e7eSAnthony Xu 1, &ioreq_pfn, NULL); 1276*93d43e7eSAnthony Xu if (state->shared_vmport_page == NULL) { 1277*93d43e7eSAnthony Xu error_report("map shared vmport IO page returned error %d handle=%p", 1278*93d43e7eSAnthony Xu errno, xen_xc); 1279*93d43e7eSAnthony Xu goto err; 1280*93d43e7eSAnthony Xu } 1281*93d43e7eSAnthony Xu } else if (rc != -ENOSYS) { 1282*93d43e7eSAnthony Xu error_report("get vmport regs pfn returned error %d, rc=%d", 1283*93d43e7eSAnthony Xu errno, rc); 1284*93d43e7eSAnthony Xu goto err; 1285*93d43e7eSAnthony Xu } 1286*93d43e7eSAnthony Xu 1287*93d43e7eSAnthony Xu state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, 1288*93d43e7eSAnthony Xu PROT_READ|PROT_WRITE, 1289*93d43e7eSAnthony Xu 1, &bufioreq_pfn, NULL); 1290*93d43e7eSAnthony Xu if (state->buffered_io_page == NULL) { 1291*93d43e7eSAnthony Xu error_report("map buffered IO page returned error %d", errno); 1292*93d43e7eSAnthony Xu goto err; 1293*93d43e7eSAnthony Xu } 1294*93d43e7eSAnthony Xu 1295*93d43e7eSAnthony Xu /* Note: cpus is empty at this point in init */ 1296*93d43e7eSAnthony Xu state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); 1297*93d43e7eSAnthony Xu 1298*93d43e7eSAnthony Xu rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); 1299*93d43e7eSAnthony Xu if (rc < 0) { 1300*93d43e7eSAnthony Xu error_report("failed to enable ioreq server info: error %d handle=%p", 1301*93d43e7eSAnthony Xu errno, xen_xc); 1302*93d43e7eSAnthony Xu goto err; 1303*93d43e7eSAnthony Xu } 1304*93d43e7eSAnthony Xu 1305*93d43e7eSAnthony Xu state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); 1306*93d43e7eSAnthony Xu 1307*93d43e7eSAnthony Xu /* FIXME: how about if we overflow the page here? */ 1308*93d43e7eSAnthony Xu for (i = 0; i < max_cpus; i++) { 1309*93d43e7eSAnthony Xu rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, 1310*93d43e7eSAnthony Xu xen_vcpu_eport(state->shared_page, i)); 1311*93d43e7eSAnthony Xu if (rc == -1) { 1312*93d43e7eSAnthony Xu error_report("shared evtchn %d bind error %d", i, errno); 1313*93d43e7eSAnthony Xu goto err; 1314*93d43e7eSAnthony Xu } 1315*93d43e7eSAnthony Xu state->ioreq_local_port[i] = rc; 1316*93d43e7eSAnthony Xu } 1317*93d43e7eSAnthony Xu 1318*93d43e7eSAnthony Xu rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, 1319*93d43e7eSAnthony Xu bufioreq_evtchn); 1320*93d43e7eSAnthony Xu if (rc == -1) { 1321*93d43e7eSAnthony Xu error_report("buffered evtchn bind error %d", errno); 1322*93d43e7eSAnthony Xu goto err; 1323*93d43e7eSAnthony Xu } 1324*93d43e7eSAnthony Xu state->bufioreq_local_port = rc; 1325*93d43e7eSAnthony Xu 1326*93d43e7eSAnthony Xu /* Init RAM management */ 1327*93d43e7eSAnthony Xu xen_map_cache_init(xen_phys_offset_to_gaddr, state); 1328*93d43e7eSAnthony Xu xen_ram_init(pcms, ram_size, ram_memory); 1329*93d43e7eSAnthony Xu 1330*93d43e7eSAnthony Xu qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); 1331*93d43e7eSAnthony Xu 1332*93d43e7eSAnthony Xu state->memory_listener = xen_memory_listener; 1333*93d43e7eSAnthony Xu QLIST_INIT(&state->physmap); 1334*93d43e7eSAnthony Xu memory_listener_register(&state->memory_listener, &address_space_memory); 1335*93d43e7eSAnthony Xu state->log_for_dirtybit = NULL; 1336*93d43e7eSAnthony Xu 1337*93d43e7eSAnthony Xu state->io_listener = xen_io_listener; 1338*93d43e7eSAnthony Xu memory_listener_register(&state->io_listener, &address_space_io); 1339*93d43e7eSAnthony Xu 1340*93d43e7eSAnthony Xu state->device_listener = xen_device_listener; 1341*93d43e7eSAnthony Xu device_listener_register(&state->device_listener); 1342*93d43e7eSAnthony Xu 1343*93d43e7eSAnthony Xu /* Initialize backend core & drivers */ 1344*93d43e7eSAnthony Xu if (xen_be_init() != 0) { 1345*93d43e7eSAnthony Xu error_report("xen backend core setup failed"); 1346*93d43e7eSAnthony Xu goto err; 1347*93d43e7eSAnthony Xu } 1348*93d43e7eSAnthony Xu xen_be_register_common(); 1349*93d43e7eSAnthony Xu xen_read_physmap(state); 1350*93d43e7eSAnthony Xu 1351*93d43e7eSAnthony Xu /* Disable ACPI build because Xen handles it */ 1352*93d43e7eSAnthony Xu pcms->acpi_build_enabled = false; 1353*93d43e7eSAnthony Xu 1354*93d43e7eSAnthony Xu return; 1355*93d43e7eSAnthony Xu 1356*93d43e7eSAnthony Xu err: 1357*93d43e7eSAnthony Xu error_report("xen hardware virtual machine initialisation failed"); 1358*93d43e7eSAnthony Xu exit(1); 1359*93d43e7eSAnthony Xu } 1360*93d43e7eSAnthony Xu 1361*93d43e7eSAnthony Xu void destroy_hvm_domain(bool reboot) 1362*93d43e7eSAnthony Xu { 1363*93d43e7eSAnthony Xu xc_interface *xc_handle; 1364*93d43e7eSAnthony Xu int sts; 1365*93d43e7eSAnthony Xu 1366*93d43e7eSAnthony Xu xc_handle = xc_interface_open(0, 0, 0); 1367*93d43e7eSAnthony Xu if (xc_handle == NULL) { 1368*93d43e7eSAnthony Xu fprintf(stderr, "Cannot acquire xenctrl handle\n"); 1369*93d43e7eSAnthony Xu } else { 1370*93d43e7eSAnthony Xu sts = xc_domain_shutdown(xc_handle, xen_domid, 1371*93d43e7eSAnthony Xu reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff); 1372*93d43e7eSAnthony Xu if (sts != 0) { 1373*93d43e7eSAnthony Xu fprintf(stderr, "xc_domain_shutdown failed to issue %s, " 1374*93d43e7eSAnthony Xu "sts %d, %s\n", reboot ? "reboot" : "poweroff", 1375*93d43e7eSAnthony Xu sts, strerror(errno)); 1376*93d43e7eSAnthony Xu } else { 1377*93d43e7eSAnthony Xu fprintf(stderr, "Issued domain %d %s\n", xen_domid, 1378*93d43e7eSAnthony Xu reboot ? "reboot" : "poweroff"); 1379*93d43e7eSAnthony Xu } 1380*93d43e7eSAnthony Xu xc_interface_close(xc_handle); 1381*93d43e7eSAnthony Xu } 1382*93d43e7eSAnthony Xu } 1383*93d43e7eSAnthony Xu 1384*93d43e7eSAnthony Xu void xen_register_framebuffer(MemoryRegion *mr) 1385*93d43e7eSAnthony Xu { 1386*93d43e7eSAnthony Xu framebuffer = mr; 1387*93d43e7eSAnthony Xu } 1388*93d43e7eSAnthony Xu 1389*93d43e7eSAnthony Xu void xen_shutdown_fatal_error(const char *fmt, ...) 1390*93d43e7eSAnthony Xu { 1391*93d43e7eSAnthony Xu va_list ap; 1392*93d43e7eSAnthony Xu 1393*93d43e7eSAnthony Xu va_start(ap, fmt); 1394*93d43e7eSAnthony Xu vfprintf(stderr, fmt, ap); 1395*93d43e7eSAnthony Xu va_end(ap); 1396*93d43e7eSAnthony Xu fprintf(stderr, "Will destroy the domain.\n"); 1397*93d43e7eSAnthony Xu /* destroy the domain */ 1398*93d43e7eSAnthony Xu qemu_system_shutdown_request(); 1399*93d43e7eSAnthony Xu } 1400*93d43e7eSAnthony Xu 1401*93d43e7eSAnthony Xu void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) 1402*93d43e7eSAnthony Xu { 1403*93d43e7eSAnthony Xu if (unlikely(xen_in_migration)) { 1404*93d43e7eSAnthony Xu int rc; 1405*93d43e7eSAnthony Xu ram_addr_t start_pfn, nb_pages; 1406*93d43e7eSAnthony Xu 1407*93d43e7eSAnthony Xu if (length == 0) { 1408*93d43e7eSAnthony Xu length = TARGET_PAGE_SIZE; 1409*93d43e7eSAnthony Xu } 1410*93d43e7eSAnthony Xu start_pfn = start >> TARGET_PAGE_BITS; 1411*93d43e7eSAnthony Xu nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) 1412*93d43e7eSAnthony Xu - start_pfn; 1413*93d43e7eSAnthony Xu rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); 1414*93d43e7eSAnthony Xu if (rc) { 1415*93d43e7eSAnthony Xu fprintf(stderr, 1416*93d43e7eSAnthony Xu "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", 1417*93d43e7eSAnthony Xu __func__, start, nb_pages, rc, strerror(-rc)); 1418*93d43e7eSAnthony Xu } 1419*93d43e7eSAnthony Xu } 1420*93d43e7eSAnthony Xu } 1421*93d43e7eSAnthony Xu 1422*93d43e7eSAnthony Xu void qmp_xen_set_global_dirty_log(bool enable, Error **errp) 1423*93d43e7eSAnthony Xu { 1424*93d43e7eSAnthony Xu if (enable) { 1425*93d43e7eSAnthony Xu memory_global_dirty_log_start(); 1426*93d43e7eSAnthony Xu } else { 1427*93d43e7eSAnthony Xu memory_global_dirty_log_stop(); 1428*93d43e7eSAnthony Xu } 1429*93d43e7eSAnthony Xu } 1430