xref: /openbmc/qemu/hw/i386/xen/xen-hvm.c (revision 081340d1ddfe27e80f653d707c278edf06a9a803)
1  /*
2   * Copyright (C) 2010       Citrix Ltd.
3   *
4   * This work is licensed under the terms of the GNU GPL, version 2.  See
5   * the COPYING file in the top-level directory.
6   *
7   * Contributions after 2012-01-13 are licensed under the terms of the
8   * GNU GPL, version 2 or (at your option) any later version.
9   */
10  
11  #include "qemu/osdep.h"
12  #include "qemu/units.h"
13  #include "qapi/error.h"
14  #include "qapi/qapi-commands-migration.h"
15  #include "trace.h"
16  
17  #include "hw/i386/pc.h"
18  #include "hw/irq.h"
19  #include "hw/i386/apic-msidef.h"
20  #include "hw/xen/xen-x86.h"
21  #include "qemu/range.h"
22  
23  #include "hw/xen/xen-hvm-common.h"
24  #include "hw/xen/arch_hvm.h"
25  #include <xen/hvm/e820.h>
26  #include "exec/target_page.h"
27  
28  static MemoryRegion ram_640k, ram_lo, ram_hi;
29  static MemoryRegion *framebuffer;
30  static bool xen_in_migration;
31  
32  /* Compatibility with older version */
33  
34  /*
35   * This allows QEMU to build on a system that has Xen 4.5 or earlier installed.
36   * This is here (not in hw/xen/xen_native.h) because xen/hvm/ioreq.h needs to
37   * be included before this block and hw/xen/xen_native.h needs to be included
38   * before xen/hvm/ioreq.h
39   */
40  #ifndef IOREQ_TYPE_VMWARE_PORT
41  #define IOREQ_TYPE_VMWARE_PORT  3
42  struct vmware_regs {
43      uint32_t esi;
44      uint32_t edi;
45      uint32_t ebx;
46      uint32_t ecx;
47      uint32_t edx;
48  };
49  typedef struct vmware_regs vmware_regs_t;
50  
51  struct shared_vmport_iopage {
52      struct vmware_regs vcpu_vmport_regs[1];
53  };
54  typedef struct shared_vmport_iopage shared_vmport_iopage_t;
55  #endif
56  
57  static shared_vmport_iopage_t *shared_vmport_page;
58  
59  static QLIST_HEAD(, XenPhysmap) xen_physmap;
60  static const XenPhysmap *log_for_dirtybit;
61  /* Buffer used by xen_sync_dirty_bitmap */
62  static unsigned long *dirty_bitmap;
63  static Notifier suspend;
64  static Notifier wakeup;
65  
66  /* Xen specific function for piix pci */
67  
68  int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
69  {
70      return irq_num + (PCI_SLOT(pci_dev->devfn) << 2);
71  }
72  
73  void xen_intx_set_irq(void *opaque, int irq_num, int level)
74  {
75      xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2,
76                             irq_num & 3, level);
77  }
78  
79  int xen_set_pci_link_route(uint8_t link, uint8_t irq)
80  {
81      return xendevicemodel_set_pci_link_route(xen_dmod, xen_domid, link, irq);
82  }
83  
84  int xen_is_pirq_msi(uint32_t msi_data)
85  {
86      /* If vector is 0, the msi is remapped into a pirq, passed as
87       * dest_id.
88       */
89      return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0;
90  }
91  
92  void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
93  {
94      xen_inject_msi(xen_domid, addr, data);
95  }
96  
97  static void xen_suspend_notifier(Notifier *notifier, void *data)
98  {
99      xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
100  }
101  
102  /* Xen Interrupt Controller */
103  
104  static void xen_set_irq(void *opaque, int irq, int level)
105  {
106      xen_set_isa_irq_level(xen_domid, irq, level);
107  }
108  
109  qemu_irq *xen_interrupt_controller_init(void)
110  {
111      return qemu_allocate_irqs(xen_set_irq, NULL, 16);
112  }
113  
114  /* Memory Ops */
115  
116  static void xen_ram_init(PCMachineState *pcms,
117                           ram_addr_t ram_size, MemoryRegion **ram_memory_p)
118  {
119      X86MachineState *x86ms = X86_MACHINE(pcms);
120      MemoryRegion *sysmem = get_system_memory();
121      ram_addr_t block_len;
122      uint64_t user_lowmem =
123          object_property_get_uint(qdev_get_machine(),
124                                   PC_MACHINE_MAX_RAM_BELOW_4G,
125                                   &error_abort);
126  
127      /* Handle the machine opt max-ram-below-4g.  It is basically doing
128       * min(xen limit, user limit).
129       */
130      if (!user_lowmem) {
131          user_lowmem = HVM_BELOW_4G_RAM_END; /* default */
132      }
133      if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
134          user_lowmem = HVM_BELOW_4G_RAM_END;
135      }
136  
137      if (ram_size >= user_lowmem) {
138          x86ms->above_4g_mem_size = ram_size - user_lowmem;
139          x86ms->below_4g_mem_size = user_lowmem;
140      } else {
141          x86ms->above_4g_mem_size = 0;
142          x86ms->below_4g_mem_size = ram_size;
143      }
144      if (!x86ms->above_4g_mem_size) {
145          block_len = ram_size;
146      } else {
147          /*
148           * Xen does not allocate the memory continuously, it keeps a
149           * hole of the size computed above or passed in.
150           */
151          block_len = (4 * GiB) + x86ms->above_4g_mem_size;
152      }
153      memory_region_init_ram(&xen_memory, NULL, "xen.ram", block_len,
154                             &error_fatal);
155      *ram_memory_p = &xen_memory;
156  
157      memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
158                               &xen_memory, 0, 0xa0000);
159      memory_region_add_subregion(sysmem, 0, &ram_640k);
160      /* Skip of the VGA IO memory space, it will be registered later by the VGA
161       * emulated device.
162       *
163       * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
164       * the Options ROM, so it is registered here as RAM.
165       */
166      memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
167                               &xen_memory, 0xc0000,
168                               x86ms->below_4g_mem_size - 0xc0000);
169      memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
170      if (x86ms->above_4g_mem_size > 0) {
171          memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
172                                   &xen_memory, 0x100000000ULL,
173                                   x86ms->above_4g_mem_size);
174          memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
175      }
176  }
177  
178  static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size,
179                                     int page_mask)
180  {
181      XenPhysmap *physmap = NULL;
182  
183      start_addr &= page_mask;
184  
185      QLIST_FOREACH(physmap, &xen_physmap, list) {
186          if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
187              return physmap;
188          }
189      }
190      return NULL;
191  }
192  
193  static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size,
194                                         int page_mask)
195  {
196      hwaddr addr = phys_offset & page_mask;
197      XenPhysmap *physmap = NULL;
198  
199      QLIST_FOREACH(physmap, &xen_physmap, list) {
200          if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
201              return physmap->start_addr + (phys_offset - physmap->phys_offset);
202          }
203      }
204  
205      return phys_offset;
206  }
207  
208  #ifdef XEN_COMPAT_PHYSMAP
209  static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
210  {
211      char path[80], value[17];
212  
213      snprintf(path, sizeof(path),
214              "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
215              xen_domid, (uint64_t)physmap->phys_offset);
216      snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr);
217      if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
218          return -1;
219      }
220      snprintf(path, sizeof(path),
221              "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
222              xen_domid, (uint64_t)physmap->phys_offset);
223      snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size);
224      if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
225          return -1;
226      }
227      if (physmap->name) {
228          snprintf(path, sizeof(path),
229                  "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
230                  xen_domid, (uint64_t)physmap->phys_offset);
231          if (!xs_write(state->xenstore, 0, path,
232                        physmap->name, strlen(physmap->name))) {
233              return -1;
234          }
235      }
236      return 0;
237  }
238  #else
239  static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
240  {
241      return 0;
242  }
243  #endif
244  
245  static int xen_add_to_physmap(XenIOState *state,
246                                hwaddr start_addr,
247                                ram_addr_t size,
248                                MemoryRegion *mr,
249                                hwaddr offset_within_region)
250  {
251      unsigned target_page_bits = qemu_target_page_bits();
252      int page_size = qemu_target_page_size();
253      int page_mask = -page_size;
254      unsigned long nr_pages;
255      int rc = 0;
256      XenPhysmap *physmap = NULL;
257      hwaddr pfn, start_gpfn;
258      hwaddr phys_offset = memory_region_get_ram_addr(mr);
259      const char *mr_name;
260  
261      if (get_physmapping(start_addr, size, page_mask)) {
262          return 0;
263      }
264      if (size <= 0) {
265          return -1;
266      }
267  
268      /* Xen can only handle a single dirty log region for now and we want
269       * the linear framebuffer to be that region.
270       * Avoid tracking any regions that is not videoram and avoid tracking
271       * the legacy vga region. */
272      if (mr == framebuffer && start_addr > 0xbffff) {
273          goto go_physmap;
274      }
275      return -1;
276  
277  go_physmap:
278      DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
279              start_addr, start_addr + size);
280  
281      mr_name = memory_region_name(mr);
282  
283      physmap = g_new(XenPhysmap, 1);
284  
285      physmap->start_addr = start_addr;
286      physmap->size = size;
287      physmap->name = mr_name;
288      physmap->phys_offset = phys_offset;
289  
290      QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
291  
292      if (runstate_check(RUN_STATE_INMIGRATE)) {
293          /* Now when we have a physmap entry we can replace a dummy mapping with
294           * a real one of guest foreign memory. */
295          uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size);
296          assert(p && p == memory_region_get_ram_ptr(mr));
297  
298          return 0;
299      }
300  
301      pfn = phys_offset >> target_page_bits;
302      start_gpfn = start_addr >> target_page_bits;
303      nr_pages = size >> target_page_bits;
304      rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn,
305                                          start_gpfn);
306      if (rc) {
307          int saved_errno = errno;
308  
309          error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx
310                       " to GFN %"HWADDR_PRIx" failed: %s",
311                       nr_pages, pfn, start_gpfn, strerror(saved_errno));
312          errno = saved_errno;
313          return -1;
314      }
315  
316      rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid,
317                                     start_addr >> target_page_bits,
318                                     (start_addr + size - 1) >> target_page_bits,
319                                     XEN_DOMCTL_MEM_CACHEATTR_WB);
320      if (rc) {
321          error_report("pin_memory_cacheattr failed: %s", strerror(errno));
322      }
323      return xen_save_physmap(state, physmap);
324  }
325  
326  static int xen_remove_from_physmap(XenIOState *state,
327                                     hwaddr start_addr,
328                                     ram_addr_t size)
329  {
330      unsigned target_page_bits = qemu_target_page_bits();
331      int page_size = qemu_target_page_size();
332      int page_mask = -page_size;
333      int rc = 0;
334      XenPhysmap *physmap = NULL;
335      hwaddr phys_offset = 0;
336  
337      physmap = get_physmapping(start_addr, size, page_mask);
338      if (physmap == NULL) {
339          return -1;
340      }
341  
342      phys_offset = physmap->phys_offset;
343      size = physmap->size;
344  
345      DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
346              "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
347  
348      size >>= target_page_bits;
349      start_addr >>= target_page_bits;
350      phys_offset >>= target_page_bits;
351      rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr,
352                                          phys_offset);
353      if (rc) {
354          int saved_errno = errno;
355  
356          error_report("relocate_memory "RAM_ADDR_FMT" pages"
357                       " from GFN %"HWADDR_PRIx
358                       " to GFN %"HWADDR_PRIx" failed: %s",
359                       size, start_addr, phys_offset, strerror(saved_errno));
360          errno = saved_errno;
361          return -1;
362      }
363  
364      QLIST_REMOVE(physmap, list);
365      if (log_for_dirtybit == physmap) {
366          log_for_dirtybit = NULL;
367          g_free(dirty_bitmap);
368          dirty_bitmap = NULL;
369      }
370      g_free(physmap);
371  
372      return 0;
373  }
374  
375  static void xen_sync_dirty_bitmap(XenIOState *state,
376                                    hwaddr start_addr,
377                                    ram_addr_t size)
378  {
379      unsigned target_page_bits = qemu_target_page_bits();
380      int page_size = qemu_target_page_size();
381      int page_mask = -page_size;
382      hwaddr npages = size >> target_page_bits;
383      const int width = sizeof(unsigned long) * 8;
384      size_t bitmap_size = DIV_ROUND_UP(npages, width);
385      int rc, i, j;
386      const XenPhysmap *physmap = NULL;
387  
388      physmap = get_physmapping(start_addr, size, page_mask);
389      if (physmap == NULL) {
390          /* not handled */
391          return;
392      }
393  
394      if (log_for_dirtybit == NULL) {
395          log_for_dirtybit = physmap;
396          dirty_bitmap = g_new(unsigned long, bitmap_size);
397      } else if (log_for_dirtybit != physmap) {
398          /* Only one range for dirty bitmap can be tracked. */
399          return;
400      }
401  
402      rc = xen_track_dirty_vram(xen_domid, start_addr >> target_page_bits,
403                                npages, dirty_bitmap);
404      if (rc < 0) {
405  #ifndef ENODATA
406  #define ENODATA  ENOENT
407  #endif
408          if (errno == ENODATA) {
409              memory_region_set_dirty(framebuffer, 0, size);
410              DPRINTF("xen: track_dirty_vram failed (0x" HWADDR_FMT_plx
411                      ", 0x" HWADDR_FMT_plx "): %s\n",
412                      start_addr, start_addr + size, strerror(errno));
413          }
414          return;
415      }
416  
417      for (i = 0; i < bitmap_size; i++) {
418          unsigned long map = dirty_bitmap[i];
419          while (map != 0) {
420              j = ctzl(map);
421              map &= ~(1ul << j);
422              memory_region_set_dirty(framebuffer,
423                                      (i * width + j) * page_size, page_size);
424          };
425      }
426  }
427  
428  static void xen_log_start(MemoryListener *listener,
429                            MemoryRegionSection *section,
430                            int old, int new)
431  {
432      XenIOState *state = container_of(listener, XenIOState, memory_listener);
433  
434      if (new & ~old & (1 << DIRTY_MEMORY_VGA)) {
435          xen_sync_dirty_bitmap(state, section->offset_within_address_space,
436                                int128_get64(section->size));
437      }
438  }
439  
440  static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
441                           int old, int new)
442  {
443      if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
444          log_for_dirtybit = NULL;
445          g_free(dirty_bitmap);
446          dirty_bitmap = NULL;
447          /* Disable dirty bit tracking */
448          xen_track_dirty_vram(xen_domid, 0, 0, NULL);
449      }
450  }
451  
452  static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
453  {
454      XenIOState *state = container_of(listener, XenIOState, memory_listener);
455  
456      xen_sync_dirty_bitmap(state, section->offset_within_address_space,
457                            int128_get64(section->size));
458  }
459  
460  static void xen_log_global_start(MemoryListener *listener)
461  {
462      if (xen_enabled()) {
463          xen_in_migration = true;
464      }
465  }
466  
467  static void xen_log_global_stop(MemoryListener *listener)
468  {
469      xen_in_migration = false;
470  }
471  
472  static const MemoryListener xen_memory_listener = {
473      .name = "xen-memory",
474      .region_add = xen_region_add,
475      .region_del = xen_region_del,
476      .log_start = xen_log_start,
477      .log_stop = xen_log_stop,
478      .log_sync = xen_log_sync,
479      .log_global_start = xen_log_global_start,
480      .log_global_stop = xen_log_global_stop,
481      .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
482  };
483  
484  static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
485  {
486      X86CPU *cpu;
487      CPUX86State *env;
488  
489      cpu = X86_CPU(current_cpu);
490      env = &cpu->env;
491      env->regs[R_EAX] = req->data;
492      env->regs[R_EBX] = vmport_regs->ebx;
493      env->regs[R_ECX] = vmport_regs->ecx;
494      env->regs[R_EDX] = vmport_regs->edx;
495      env->regs[R_ESI] = vmport_regs->esi;
496      env->regs[R_EDI] = vmport_regs->edi;
497  }
498  
499  static void regs_from_cpu(vmware_regs_t *vmport_regs)
500  {
501      X86CPU *cpu = X86_CPU(current_cpu);
502      CPUX86State *env = &cpu->env;
503  
504      vmport_regs->ebx = env->regs[R_EBX];
505      vmport_regs->ecx = env->regs[R_ECX];
506      vmport_regs->edx = env->regs[R_EDX];
507      vmport_regs->esi = env->regs[R_ESI];
508      vmport_regs->edi = env->regs[R_EDI];
509  }
510  
511  static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
512  {
513      vmware_regs_t *vmport_regs;
514  
515      assert(shared_vmport_page);
516      vmport_regs =
517          &shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
518      QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
519  
520      current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
521      regs_to_cpu(vmport_regs, req);
522      cpu_ioreq_pio(req);
523      regs_from_cpu(vmport_regs);
524      current_cpu = NULL;
525  }
526  
527  #ifdef XEN_COMPAT_PHYSMAP
528  static void xen_read_physmap(XenIOState *state)
529  {
530      XenPhysmap *physmap = NULL;
531      unsigned int len, num, i;
532      char path[80], *value = NULL;
533      char **entries = NULL;
534  
535      snprintf(path, sizeof(path),
536              "/local/domain/0/device-model/%d/physmap", xen_domid);
537      entries = xs_directory(state->xenstore, 0, path, &num);
538      if (entries == NULL)
539          return;
540  
541      for (i = 0; i < num; i++) {
542          physmap = g_new(XenPhysmap, 1);
543          physmap->phys_offset = strtoull(entries[i], NULL, 16);
544          snprintf(path, sizeof(path),
545                  "/local/domain/0/device-model/%d/physmap/%s/start_addr",
546                  xen_domid, entries[i]);
547          value = xs_read(state->xenstore, 0, path, &len);
548          if (value == NULL) {
549              g_free(physmap);
550              continue;
551          }
552          physmap->start_addr = strtoull(value, NULL, 16);
553          free(value);
554  
555          snprintf(path, sizeof(path),
556                  "/local/domain/0/device-model/%d/physmap/%s/size",
557                  xen_domid, entries[i]);
558          value = xs_read(state->xenstore, 0, path, &len);
559          if (value == NULL) {
560              g_free(physmap);
561              continue;
562          }
563          physmap->size = strtoull(value, NULL, 16);
564          free(value);
565  
566          snprintf(path, sizeof(path),
567                  "/local/domain/0/device-model/%d/physmap/%s/name",
568                  xen_domid, entries[i]);
569          physmap->name = xs_read(state->xenstore, 0, path, &len);
570  
571          QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
572      }
573      free(entries);
574  }
575  #else
576  static void xen_read_physmap(XenIOState *state)
577  {
578  }
579  #endif
580  
581  static void xen_wakeup_notifier(Notifier *notifier, void *data)
582  {
583      xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
584  }
585  
586  void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
587  {
588      MachineState *ms = MACHINE(pcms);
589      unsigned int max_cpus = ms->smp.max_cpus;
590      int rc;
591      xen_pfn_t ioreq_pfn;
592      XenIOState *state;
593  
594      state = g_new0(XenIOState, 1);
595  
596      xen_register_ioreq(state, max_cpus, &xen_memory_listener);
597  
598      QLIST_INIT(&xen_physmap);
599      xen_read_physmap(state);
600  
601      suspend.notify = xen_suspend_notifier;
602      qemu_register_suspend_notifier(&suspend);
603  
604      wakeup.notify = xen_wakeup_notifier;
605      qemu_register_wakeup_notifier(&wakeup);
606  
607      rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
608      if (!rc) {
609          DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
610          shared_vmport_page =
611              xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
612                                   1, &ioreq_pfn, NULL);
613          if (shared_vmport_page == NULL) {
614              error_report("map shared vmport IO page returned error %d handle=%p",
615                           errno, xen_xc);
616              goto err;
617          }
618      } else if (rc != -ENOSYS) {
619          error_report("get vmport regs pfn returned error %d, rc=%d",
620                       errno, rc);
621          goto err;
622      }
623  
624      xen_ram_init(pcms, ms->ram_size, ram_memory);
625  
626      /* Disable ACPI build because Xen handles it */
627      pcms->acpi_build_enabled = false;
628  
629      return;
630  
631  err:
632      error_report("xen hardware virtual machine initialisation failed");
633      exit(1);
634  }
635  
636  void xen_register_framebuffer(MemoryRegion *mr)
637  {
638      framebuffer = mr;
639  }
640  
641  void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
642  {
643      unsigned target_page_bits = qemu_target_page_bits();
644      int page_size = qemu_target_page_size();
645      int page_mask = -page_size;
646  
647      if (unlikely(xen_in_migration)) {
648          int rc;
649          ram_addr_t start_pfn, nb_pages;
650  
651          start = xen_phys_offset_to_gaddr(start, length, page_mask);
652  
653          if (length == 0) {
654              length = page_size;
655          }
656          start_pfn = start >> target_page_bits;
657          nb_pages = ((start + length + page_size - 1) >> target_page_bits)
658              - start_pfn;
659          rc = xen_modified_memory(xen_domid, start_pfn, nb_pages);
660          if (rc) {
661              fprintf(stderr,
662                      "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
663                      __func__, start, nb_pages, errno, strerror(errno));
664          }
665      }
666  }
667  
668  void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
669  {
670      if (enable) {
671          memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
672      } else {
673          memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
674      }
675  }
676  
677  void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section,
678                                  bool add)
679  {
680      unsigned target_page_bits = qemu_target_page_bits();
681      int page_size = qemu_target_page_size();
682      int page_mask = -page_size;
683      hwaddr start_addr = section->offset_within_address_space;
684      ram_addr_t size = int128_get64(section->size);
685      bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
686      hvmmem_type_t mem_type;
687  
688      if (!memory_region_is_ram(section->mr)) {
689          return;
690      }
691  
692      if (log_dirty != add) {
693          return;
694      }
695  
696      trace_xen_client_set_memory(start_addr, size, log_dirty);
697  
698      start_addr &= page_mask;
699      size = ROUND_UP(size, page_size);
700  
701      if (add) {
702          if (!memory_region_is_rom(section->mr)) {
703              xen_add_to_physmap(state, start_addr, size,
704                                 section->mr, section->offset_within_region);
705          } else {
706              mem_type = HVMMEM_ram_ro;
707              if (xen_set_mem_type(xen_domid, mem_type,
708                                   start_addr >> target_page_bits,
709                                   size >> target_page_bits)) {
710                  DPRINTF("xen_set_mem_type error, addr: "HWADDR_FMT_plx"\n",
711                          start_addr);
712              }
713          }
714      } else {
715          if (xen_remove_from_physmap(state, start_addr, size) < 0) {
716              DPRINTF("physmapping does not exist at "HWADDR_FMT_plx"\n", start_addr);
717          }
718      }
719  }
720  
721  void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
722  {
723      switch (req->type) {
724      case IOREQ_TYPE_VMWARE_PORT:
725              handle_vmport_ioreq(state, req);
726          break;
727      default:
728          hw_error("Invalid ioreq type 0x%x\n", req->type);
729      }
730  
731      return;
732  }
733