1 /* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef MEMORY_H 15 #define MEMORY_H 16 17 #ifndef CONFIG_USER_ONLY 18 19 #define DIRTY_MEMORY_VGA 0 20 #define DIRTY_MEMORY_CODE 1 21 #define DIRTY_MEMORY_MIGRATION 2 22 #define DIRTY_MEMORY_NUM 3 /* num of dirty bits */ 23 24 #include <stdint.h> 25 #include <stdbool.h> 26 #include "qemu-common.h" 27 #include "exec/cpu-common.h" 28 #ifndef CONFIG_USER_ONLY 29 #include "exec/hwaddr.h" 30 #endif 31 #include "qemu/queue.h" 32 #include "qemu/int128.h" 33 #include "qemu/notify.h" 34 35 #define MAX_PHYS_ADDR_SPACE_BITS 62 36 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 37 38 typedef struct MemoryRegionOps MemoryRegionOps; 39 typedef struct MemoryRegionMmio MemoryRegionMmio; 40 41 struct MemoryRegionMmio { 42 CPUReadMemoryFunc *read[3]; 43 CPUWriteMemoryFunc *write[3]; 44 }; 45 46 typedef struct IOMMUTLBEntry IOMMUTLBEntry; 47 48 /* See address_space_translate: bit 0 is read, bit 1 is write. */ 49 typedef enum { 50 IOMMU_NONE = 0, 51 IOMMU_RO = 1, 52 IOMMU_WO = 2, 53 IOMMU_RW = 3, 54 } IOMMUAccessFlags; 55 56 struct IOMMUTLBEntry { 57 AddressSpace *target_as; 58 hwaddr iova; 59 hwaddr translated_addr; 60 hwaddr addr_mask; /* 0xfff = 4k translation */ 61 IOMMUAccessFlags perm; 62 }; 63 64 /* 65 * Memory region callbacks 66 */ 67 struct MemoryRegionOps { 68 /* Read from the memory region. @addr is relative to @mr; @size is 69 * in bytes. */ 70 uint64_t (*read)(void *opaque, 71 hwaddr addr, 72 unsigned size); 73 /* Write to the memory region. @addr is relative to @mr; @size is 74 * in bytes. */ 75 void (*write)(void *opaque, 76 hwaddr addr, 77 uint64_t data, 78 unsigned size); 79 80 enum device_endian endianness; 81 /* Guest-visible constraints: */ 82 struct { 83 /* If nonzero, specify bounds on access sizes beyond which a machine 84 * check is thrown. 85 */ 86 unsigned min_access_size; 87 unsigned max_access_size; 88 /* If true, unaligned accesses are supported. Otherwise unaligned 89 * accesses throw machine checks. 90 */ 91 bool unaligned; 92 /* 93 * If present, and returns #false, the transaction is not accepted 94 * by the device (and results in machine dependent behaviour such 95 * as a machine check exception). 96 */ 97 bool (*accepts)(void *opaque, hwaddr addr, 98 unsigned size, bool is_write); 99 } valid; 100 /* Internal implementation constraints: */ 101 struct { 102 /* If nonzero, specifies the minimum size implemented. Smaller sizes 103 * will be rounded upwards and a partial result will be returned. 104 */ 105 unsigned min_access_size; 106 /* If nonzero, specifies the maximum size implemented. Larger sizes 107 * will be done as a series of accesses with smaller sizes. 108 */ 109 unsigned max_access_size; 110 /* If true, unaligned accesses are supported. Otherwise all accesses 111 * are converted to (possibly multiple) naturally aligned accesses. 112 */ 113 bool unaligned; 114 } impl; 115 116 /* If .read and .write are not present, old_mmio may be used for 117 * backwards compatibility with old mmio registration 118 */ 119 const MemoryRegionMmio old_mmio; 120 }; 121 122 typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps; 123 124 struct MemoryRegionIOMMUOps { 125 /* Return a TLB entry that contains a given address. */ 126 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr); 127 }; 128 129 typedef struct CoalescedMemoryRange CoalescedMemoryRange; 130 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 131 132 struct MemoryRegion { 133 /* All fields are private - violators will be prosecuted */ 134 const MemoryRegionOps *ops; 135 const MemoryRegionIOMMUOps *iommu_ops; 136 void *opaque; 137 struct Object *owner; 138 MemoryRegion *parent; 139 Int128 size; 140 hwaddr addr; 141 void (*destructor)(MemoryRegion *mr); 142 ram_addr_t ram_addr; 143 bool subpage; 144 bool terminates; 145 bool romd_mode; 146 bool ram; 147 bool readonly; /* For RAM regions */ 148 bool enabled; 149 bool rom_device; 150 bool warning_printed; /* For reservations */ 151 bool flush_coalesced_mmio; 152 MemoryRegion *alias; 153 hwaddr alias_offset; 154 int priority; 155 bool may_overlap; 156 QTAILQ_HEAD(subregions, MemoryRegion) subregions; 157 QTAILQ_ENTRY(MemoryRegion) subregions_link; 158 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced; 159 const char *name; 160 uint8_t dirty_log_mask; 161 unsigned ioeventfd_nb; 162 MemoryRegionIoeventfd *ioeventfds; 163 NotifierList iommu_notify; 164 }; 165 166 typedef struct MemoryListener MemoryListener; 167 168 /** 169 * MemoryListener: callbacks structure for updates to the physical memory map 170 * 171 * Allows a component to adjust to changes in the guest-visible memory map. 172 * Use with memory_listener_register() and memory_listener_unregister(). 173 */ 174 struct MemoryListener { 175 void (*begin)(MemoryListener *listener); 176 void (*commit)(MemoryListener *listener); 177 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 178 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 179 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 180 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section); 181 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section); 182 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 183 void (*log_global_start)(MemoryListener *listener); 184 void (*log_global_stop)(MemoryListener *listener); 185 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 186 bool match_data, uint64_t data, EventNotifier *e); 187 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 188 bool match_data, uint64_t data, EventNotifier *e); 189 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, 190 hwaddr addr, hwaddr len); 191 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, 192 hwaddr addr, hwaddr len); 193 /* Lower = earlier (during add), later (during del) */ 194 unsigned priority; 195 AddressSpace *address_space_filter; 196 QTAILQ_ENTRY(MemoryListener) link; 197 }; 198 199 /** 200 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects 201 */ 202 struct AddressSpace { 203 /* All fields are private. */ 204 char *name; 205 MemoryRegion *root; 206 struct FlatView *current_map; 207 int ioeventfd_nb; 208 struct MemoryRegionIoeventfd *ioeventfds; 209 struct AddressSpaceDispatch *dispatch; 210 struct AddressSpaceDispatch *next_dispatch; 211 MemoryListener dispatch_listener; 212 213 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 214 }; 215 216 /** 217 * MemoryRegionSection: describes a fragment of a #MemoryRegion 218 * 219 * @mr: the region, or %NULL if empty 220 * @address_space: the address space the region is mapped in 221 * @offset_within_region: the beginning of the section, relative to @mr's start 222 * @size: the size of the section; will not exceed @mr's boundaries 223 * @offset_within_address_space: the address of the first byte of the section 224 * relative to the region's address space 225 * @readonly: writes to this section are ignored 226 */ 227 struct MemoryRegionSection { 228 MemoryRegion *mr; 229 AddressSpace *address_space; 230 hwaddr offset_within_region; 231 Int128 size; 232 hwaddr offset_within_address_space; 233 bool readonly; 234 }; 235 236 /** 237 * memory_region_init: Initialize a memory region 238 * 239 * The region typically acts as a container for other memory regions. Use 240 * memory_region_add_subregion() to add subregions. 241 * 242 * @mr: the #MemoryRegion to be initialized 243 * @owner: the object that tracks the region's reference count 244 * @name: used for debugging; not visible to the user or ABI 245 * @size: size of the region; any subregions beyond this size will be clipped 246 */ 247 void memory_region_init(MemoryRegion *mr, 248 struct Object *owner, 249 const char *name, 250 uint64_t size); 251 252 /** 253 * memory_region_ref: Add 1 to a memory region's reference count 254 * 255 * Whenever memory regions are accessed outside the BQL, they need to be 256 * preserved against hot-unplug. MemoryRegions actually do not have their 257 * own reference count; they piggyback on a QOM object, their "owner". 258 * This function adds a reference to the owner. 259 * 260 * All MemoryRegions must have an owner if they can disappear, even if the 261 * device they belong to operates exclusively under the BQL. This is because 262 * the region could be returned at any time by memory_region_find, and this 263 * is usually under guest control. 264 * 265 * @mr: the #MemoryRegion 266 */ 267 void memory_region_ref(MemoryRegion *mr); 268 269 /** 270 * memory_region_unref: Remove 1 to a memory region's reference count 271 * 272 * Whenever memory regions are accessed outside the BQL, they need to be 273 * preserved against hot-unplug. MemoryRegions actually do not have their 274 * own reference count; they piggyback on a QOM object, their "owner". 275 * This function removes a reference to the owner and possibly destroys it. 276 * 277 * @mr: the #MemoryRegion 278 */ 279 void memory_region_unref(MemoryRegion *mr); 280 281 /** 282 * memory_region_init_io: Initialize an I/O memory region. 283 * 284 * Accesses into the region will cause the callbacks in @ops to be called. 285 * if @size is nonzero, subregions will be clipped to @size. 286 * 287 * @mr: the #MemoryRegion to be initialized. 288 * @owner: the object that tracks the region's reference count 289 * @ops: a structure containing read and write callbacks to be used when 290 * I/O is performed on the region. 291 * @opaque: passed to to the read and write callbacks of the @ops structure. 292 * @name: used for debugging; not visible to the user or ABI 293 * @size: size of the region. 294 */ 295 void memory_region_init_io(MemoryRegion *mr, 296 struct Object *owner, 297 const MemoryRegionOps *ops, 298 void *opaque, 299 const char *name, 300 uint64_t size); 301 302 /** 303 * memory_region_init_ram: Initialize RAM memory region. Accesses into the 304 * region will modify memory directly. 305 * 306 * @mr: the #MemoryRegion to be initialized. 307 * @owner: the object that tracks the region's reference count 308 * @name: the name of the region. 309 * @size: size of the region. 310 */ 311 void memory_region_init_ram(MemoryRegion *mr, 312 struct Object *owner, 313 const char *name, 314 uint64_t size); 315 316 /** 317 * memory_region_init_ram_ptr: Initialize RAM memory region from a 318 * user-provided pointer. Accesses into the 319 * region will modify memory directly. 320 * 321 * @mr: the #MemoryRegion to be initialized. 322 * @owner: the object that tracks the region's reference count 323 * @name: the name of the region. 324 * @size: size of the region. 325 * @ptr: memory to be mapped; must contain at least @size bytes. 326 */ 327 void memory_region_init_ram_ptr(MemoryRegion *mr, 328 struct Object *owner, 329 const char *name, 330 uint64_t size, 331 void *ptr); 332 333 /** 334 * memory_region_init_alias: Initialize a memory region that aliases all or a 335 * part of another memory region. 336 * 337 * @mr: the #MemoryRegion to be initialized. 338 * @owner: the object that tracks the region's reference count 339 * @name: used for debugging; not visible to the user or ABI 340 * @orig: the region to be referenced; @mr will be equivalent to 341 * @orig between @offset and @offset + @size - 1. 342 * @offset: start of the section in @orig to be referenced. 343 * @size: size of the region. 344 */ 345 void memory_region_init_alias(MemoryRegion *mr, 346 struct Object *owner, 347 const char *name, 348 MemoryRegion *orig, 349 hwaddr offset, 350 uint64_t size); 351 352 /** 353 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are 354 * handled via callbacks. 355 * 356 * @mr: the #MemoryRegion to be initialized. 357 * @owner: the object that tracks the region's reference count 358 * @ops: callbacks for write access handling. 359 * @name: the name of the region. 360 * @size: size of the region. 361 */ 362 void memory_region_init_rom_device(MemoryRegion *mr, 363 struct Object *owner, 364 const MemoryRegionOps *ops, 365 void *opaque, 366 const char *name, 367 uint64_t size); 368 369 /** 370 * memory_region_init_reservation: Initialize a memory region that reserves 371 * I/O space. 372 * 373 * A reservation region primariy serves debugging purposes. It claims I/O 374 * space that is not supposed to be handled by QEMU itself. Any access via 375 * the memory API will cause an abort(). 376 * 377 * @mr: the #MemoryRegion to be initialized 378 * @owner: the object that tracks the region's reference count 379 * @name: used for debugging; not visible to the user or ABI 380 * @size: size of the region. 381 */ 382 void memory_region_init_reservation(MemoryRegion *mr, 383 struct Object *owner, 384 const char *name, 385 uint64_t size); 386 387 /** 388 * memory_region_init_iommu: Initialize a memory region that translates 389 * addresses 390 * 391 * An IOMMU region translates addresses and forwards accesses to a target 392 * memory region. 393 * 394 * @mr: the #MemoryRegion to be initialized 395 * @owner: the object that tracks the region's reference count 396 * @ops: a function that translates addresses into the @target region 397 * @name: used for debugging; not visible to the user or ABI 398 * @size: size of the region. 399 */ 400 void memory_region_init_iommu(MemoryRegion *mr, 401 struct Object *owner, 402 const MemoryRegionIOMMUOps *ops, 403 const char *name, 404 uint64_t size); 405 406 /** 407 * memory_region_destroy: Destroy a memory region and reclaim all resources. 408 * 409 * @mr: the region to be destroyed. May not currently be a subregion 410 * (see memory_region_add_subregion()) or referenced in an alias 411 * (see memory_region_init_alias()). 412 */ 413 void memory_region_destroy(MemoryRegion *mr); 414 415 /** 416 * memory_region_owner: get a memory region's owner. 417 * 418 * @mr: the memory region being queried. 419 */ 420 struct Object *memory_region_owner(MemoryRegion *mr); 421 422 /** 423 * memory_region_size: get a memory region's size. 424 * 425 * @mr: the memory region being queried. 426 */ 427 uint64_t memory_region_size(MemoryRegion *mr); 428 429 /** 430 * memory_region_is_ram: check whether a memory region is random access 431 * 432 * Returns %true is a memory region is random access. 433 * 434 * @mr: the memory region being queried 435 */ 436 bool memory_region_is_ram(MemoryRegion *mr); 437 438 /** 439 * memory_region_is_romd: check whether a memory region is in ROMD mode 440 * 441 * Returns %true if a memory region is a ROM device and currently set to allow 442 * direct reads. 443 * 444 * @mr: the memory region being queried 445 */ 446 static inline bool memory_region_is_romd(MemoryRegion *mr) 447 { 448 return mr->rom_device && mr->romd_mode; 449 } 450 451 /** 452 * memory_region_is_iommu: check whether a memory region is an iommu 453 * 454 * Returns %true is a memory region is an iommu. 455 * 456 * @mr: the memory region being queried 457 */ 458 bool memory_region_is_iommu(MemoryRegion *mr); 459 460 /** 461 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 462 * 463 * @mr: the memory region that was changed 464 * @entry: the new entry in the IOMMU translation table. The entry 465 * replaces all old entries for the same virtual I/O address range. 466 * Deleted entries have .@perm == 0. 467 */ 468 void memory_region_notify_iommu(MemoryRegion *mr, 469 IOMMUTLBEntry entry); 470 471 /** 472 * memory_region_register_iommu_notifier: register a notifier for changes to 473 * IOMMU translation entries. 474 * 475 * @mr: the memory region to observe 476 * @n: the notifier to be added; the notifier receives a pointer to an 477 * #IOMMUTLBEntry as the opaque value; the pointer ceases to be 478 * valid on exit from the notifier. 479 */ 480 void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); 481 482 /** 483 * memory_region_unregister_iommu_notifier: unregister a notifier for 484 * changes to IOMMU translation entries. 485 * 486 * @n: the notifier to be removed. 487 */ 488 void memory_region_unregister_iommu_notifier(Notifier *n); 489 490 /** 491 * memory_region_name: get a memory region's name 492 * 493 * Returns the string that was used to initialize the memory region. 494 * 495 * @mr: the memory region being queried 496 */ 497 const char *memory_region_name(MemoryRegion *mr); 498 499 /** 500 * memory_region_is_logging: return whether a memory region is logging writes 501 * 502 * Returns %true if the memory region is logging writes 503 * 504 * @mr: the memory region being queried 505 */ 506 bool memory_region_is_logging(MemoryRegion *mr); 507 508 /** 509 * memory_region_is_rom: check whether a memory region is ROM 510 * 511 * Returns %true is a memory region is read-only memory. 512 * 513 * @mr: the memory region being queried 514 */ 515 bool memory_region_is_rom(MemoryRegion *mr); 516 517 /** 518 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 519 * 520 * Returns a host pointer to a RAM memory region (created with 521 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with 522 * care. 523 * 524 * @mr: the memory region being queried. 525 */ 526 void *memory_region_get_ram_ptr(MemoryRegion *mr); 527 528 /** 529 * memory_region_set_log: Turn dirty logging on or off for a region. 530 * 531 * Turns dirty logging on or off for a specified client (display, migration). 532 * Only meaningful for RAM regions. 533 * 534 * @mr: the memory region being updated. 535 * @log: whether dirty logging is to be enabled or disabled. 536 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 537 * %DIRTY_MEMORY_VGA. 538 */ 539 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 540 541 /** 542 * memory_region_get_dirty: Check whether a range of bytes is dirty 543 * for a specified client. 544 * 545 * Checks whether a range of bytes has been written to since the last 546 * call to memory_region_reset_dirty() with the same @client. Dirty logging 547 * must be enabled. 548 * 549 * @mr: the memory region being queried. 550 * @addr: the address (relative to the start of the region) being queried. 551 * @size: the size of the range being queried. 552 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 553 * %DIRTY_MEMORY_VGA. 554 */ 555 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, 556 hwaddr size, unsigned client); 557 558 /** 559 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 560 * 561 * Marks a range of bytes as dirty, after it has been dirtied outside 562 * guest code. 563 * 564 * @mr: the memory region being dirtied. 565 * @addr: the address (relative to the start of the region) being dirtied. 566 * @size: size of the range being dirtied. 567 */ 568 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 569 hwaddr size); 570 571 /** 572 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty 573 * for a specified client. It clears them. 574 * 575 * Checks whether a range of bytes has been written to since the last 576 * call to memory_region_reset_dirty() with the same @client. Dirty logging 577 * must be enabled. 578 * 579 * @mr: the memory region being queried. 580 * @addr: the address (relative to the start of the region) being queried. 581 * @size: the size of the range being queried. 582 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 583 * %DIRTY_MEMORY_VGA. 584 */ 585 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, 586 hwaddr size, unsigned client); 587 /** 588 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with 589 * any external TLBs (e.g. kvm) 590 * 591 * Flushes dirty information from accelerators such as kvm and vhost-net 592 * and makes it available to users of the memory API. 593 * 594 * @mr: the region being flushed. 595 */ 596 void memory_region_sync_dirty_bitmap(MemoryRegion *mr); 597 598 /** 599 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 600 * client. 601 * 602 * Marks a range of pages as no longer dirty. 603 * 604 * @mr: the region being updated. 605 * @addr: the start of the subrange being cleaned. 606 * @size: the size of the subrange being cleaned. 607 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 608 * %DIRTY_MEMORY_VGA. 609 */ 610 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 611 hwaddr size, unsigned client); 612 613 /** 614 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 615 * 616 * Allows a memory region to be marked as read-only (turning it into a ROM). 617 * only useful on RAM regions. 618 * 619 * @mr: the region being updated. 620 * @readonly: whether rhe region is to be ROM or RAM. 621 */ 622 void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 623 624 /** 625 * memory_region_rom_device_set_romd: enable/disable ROMD mode 626 * 627 * Allows a ROM device (initialized with memory_region_init_rom_device() to 628 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 629 * device is mapped to guest memory and satisfies read access directly. 630 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 631 * Writes are always handled by the #MemoryRegion.write function. 632 * 633 * @mr: the memory region to be updated 634 * @romd_mode: %true to put the region into ROMD mode 635 */ 636 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 637 638 /** 639 * memory_region_set_coalescing: Enable memory coalescing for the region. 640 * 641 * Enabled writes to a region to be queued for later processing. MMIO ->write 642 * callbacks may be delayed until a non-coalesced MMIO is issued. 643 * Only useful for IO regions. Roughly similar to write-combining hardware. 644 * 645 * @mr: the memory region to be write coalesced 646 */ 647 void memory_region_set_coalescing(MemoryRegion *mr); 648 649 /** 650 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 651 * a region. 652 * 653 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 654 * Multiple calls can be issued coalesced disjoint ranges. 655 * 656 * @mr: the memory region to be updated. 657 * @offset: the start of the range within the region to be coalesced. 658 * @size: the size of the subrange to be coalesced. 659 */ 660 void memory_region_add_coalescing(MemoryRegion *mr, 661 hwaddr offset, 662 uint64_t size); 663 664 /** 665 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 666 * 667 * Disables any coalescing caused by memory_region_set_coalescing() or 668 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 669 * hardware. 670 * 671 * @mr: the memory region to be updated. 672 */ 673 void memory_region_clear_coalescing(MemoryRegion *mr); 674 675 /** 676 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 677 * accesses. 678 * 679 * Ensure that pending coalesced MMIO request are flushed before the memory 680 * region is accessed. This property is automatically enabled for all regions 681 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 682 * 683 * @mr: the memory region to be updated. 684 */ 685 void memory_region_set_flush_coalesced(MemoryRegion *mr); 686 687 /** 688 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 689 * accesses. 690 * 691 * Clear the automatic coalesced MMIO flushing enabled via 692 * memory_region_set_flush_coalesced. Note that this service has no effect on 693 * memory regions that have MMIO coalescing enabled for themselves. For them, 694 * automatic flushing will stop once coalescing is disabled. 695 * 696 * @mr: the memory region to be updated. 697 */ 698 void memory_region_clear_flush_coalesced(MemoryRegion *mr); 699 700 /** 701 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 702 * is written to a location. 703 * 704 * Marks a word in an IO region (initialized with memory_region_init_io()) 705 * as a trigger for an eventfd event. The I/O callback will not be called. 706 * The caller must be prepared to handle failure (that is, take the required 707 * action if the callback _is_ called). 708 * 709 * @mr: the memory region being updated. 710 * @addr: the address within @mr that is to be monitored 711 * @size: the size of the access to trigger the eventfd 712 * @match_data: whether to match against @data, instead of just @addr 713 * @data: the data to match against the guest write 714 * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 715 **/ 716 void memory_region_add_eventfd(MemoryRegion *mr, 717 hwaddr addr, 718 unsigned size, 719 bool match_data, 720 uint64_t data, 721 EventNotifier *e); 722 723 /** 724 * memory_region_del_eventfd: Cancel an eventfd. 725 * 726 * Cancels an eventfd trigger requested by a previous 727 * memory_region_add_eventfd() call. 728 * 729 * @mr: the memory region being updated. 730 * @addr: the address within @mr that is to be monitored 731 * @size: the size of the access to trigger the eventfd 732 * @match_data: whether to match against @data, instead of just @addr 733 * @data: the data to match against the guest write 734 * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 735 */ 736 void memory_region_del_eventfd(MemoryRegion *mr, 737 hwaddr addr, 738 unsigned size, 739 bool match_data, 740 uint64_t data, 741 EventNotifier *e); 742 743 /** 744 * memory_region_add_subregion: Add a subregion to a container. 745 * 746 * Adds a subregion at @offset. The subregion may not overlap with other 747 * subregions (except for those explicitly marked as overlapping). A region 748 * may only be added once as a subregion (unless removed with 749 * memory_region_del_subregion()); use memory_region_init_alias() if you 750 * want a region to be a subregion in multiple locations. 751 * 752 * @mr: the region to contain the new subregion; must be a container 753 * initialized with memory_region_init(). 754 * @offset: the offset relative to @mr where @subregion is added. 755 * @subregion: the subregion to be added. 756 */ 757 void memory_region_add_subregion(MemoryRegion *mr, 758 hwaddr offset, 759 MemoryRegion *subregion); 760 /** 761 * memory_region_add_subregion_overlap: Add a subregion to a container 762 * with overlap. 763 * 764 * Adds a subregion at @offset. The subregion may overlap with other 765 * subregions. Conflicts are resolved by having a higher @priority hide a 766 * lower @priority. Subregions without priority are taken as @priority 0. 767 * A region may only be added once as a subregion (unless removed with 768 * memory_region_del_subregion()); use memory_region_init_alias() if you 769 * want a region to be a subregion in multiple locations. 770 * 771 * @mr: the region to contain the new subregion; must be a container 772 * initialized with memory_region_init(). 773 * @offset: the offset relative to @mr where @subregion is added. 774 * @subregion: the subregion to be added. 775 * @priority: used for resolving overlaps; highest priority wins. 776 */ 777 void memory_region_add_subregion_overlap(MemoryRegion *mr, 778 hwaddr offset, 779 MemoryRegion *subregion, 780 int priority); 781 782 /** 783 * memory_region_get_ram_addr: Get the ram address associated with a memory 784 * region 785 * 786 * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen 787 * code is being reworked. 788 */ 789 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 790 791 /** 792 * memory_region_del_subregion: Remove a subregion. 793 * 794 * Removes a subregion from its container. 795 * 796 * @mr: the container to be updated. 797 * @subregion: the region being removed; must be a current subregion of @mr. 798 */ 799 void memory_region_del_subregion(MemoryRegion *mr, 800 MemoryRegion *subregion); 801 802 /* 803 * memory_region_set_enabled: dynamically enable or disable a region 804 * 805 * Enables or disables a memory region. A disabled memory region 806 * ignores all accesses to itself and its subregions. It does not 807 * obscure sibling subregions with lower priority - it simply behaves as 808 * if it was removed from the hierarchy. 809 * 810 * Regions default to being enabled. 811 * 812 * @mr: the region to be updated 813 * @enabled: whether to enable or disable the region 814 */ 815 void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 816 817 /* 818 * memory_region_set_address: dynamically update the address of a region 819 * 820 * Dynamically updates the address of a region, relative to its parent. 821 * May be used on regions are currently part of a memory hierarchy. 822 * 823 * @mr: the region to be updated 824 * @addr: new address, relative to parent region 825 */ 826 void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 827 828 /* 829 * memory_region_set_alias_offset: dynamically update a memory alias's offset 830 * 831 * Dynamically updates the offset into the target region that an alias points 832 * to, as if the fourth argument to memory_region_init_alias() has changed. 833 * 834 * @mr: the #MemoryRegion to be updated; should be an alias. 835 * @offset: the new offset into the target memory region 836 */ 837 void memory_region_set_alias_offset(MemoryRegion *mr, 838 hwaddr offset); 839 840 /** 841 * memory_region_present: translate an address/size relative to a 842 * MemoryRegion into a #MemoryRegionSection. 843 * 844 * Answer whether a #MemoryRegion within @parent covers the address 845 * @addr. 846 * 847 * @parent: a MemoryRegion within which @addr is a relative address 848 * @addr: the area within @parent to be searched 849 */ 850 bool memory_region_present(MemoryRegion *parent, hwaddr addr); 851 852 /** 853 * memory_region_find: translate an address/size relative to a 854 * MemoryRegion into a #MemoryRegionSection. 855 * 856 * Locates the first #MemoryRegion within @mr that overlaps the range 857 * given by @addr and @size. 858 * 859 * Returns a #MemoryRegionSection that describes a contiguous overlap. 860 * It will have the following characteristics: 861 * .@size = 0 iff no overlap was found 862 * .@mr is non-%NULL iff an overlap was found 863 * 864 * Remember that in the return value the @offset_within_region is 865 * relative to the returned region (in the .@mr field), not to the 866 * @mr argument. 867 * 868 * Similarly, the .@offset_within_address_space is relative to the 869 * address space that contains both regions, the passed and the 870 * returned one. However, in the special case where the @mr argument 871 * has no parent (and thus is the root of the address space), the 872 * following will hold: 873 * .@offset_within_address_space >= @addr 874 * .@offset_within_address_space + .@size <= @addr + @size 875 * 876 * @mr: a MemoryRegion within which @addr is a relative address 877 * @addr: start of the area within @as to be searched 878 * @size: size of the area to be searched 879 */ 880 MemoryRegionSection memory_region_find(MemoryRegion *mr, 881 hwaddr addr, uint64_t size); 882 883 /** 884 * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory 885 * 886 * Synchronizes the dirty page log for an entire address space. 887 * @as: the address space that contains the memory being synchronized 888 */ 889 void address_space_sync_dirty_bitmap(AddressSpace *as); 890 891 /** 892 * memory_region_transaction_begin: Start a transaction. 893 * 894 * During a transaction, changes will be accumulated and made visible 895 * only when the transaction ends (is committed). 896 */ 897 void memory_region_transaction_begin(void); 898 899 /** 900 * memory_region_transaction_commit: Commit a transaction and make changes 901 * visible to the guest. 902 */ 903 void memory_region_transaction_commit(void); 904 905 /** 906 * memory_listener_register: register callbacks to be called when memory 907 * sections are mapped or unmapped into an address 908 * space 909 * 910 * @listener: an object containing the callbacks to be called 911 * @filter: if non-%NULL, only regions in this address space will be observed 912 */ 913 void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 914 915 /** 916 * memory_listener_unregister: undo the effect of memory_listener_register() 917 * 918 * @listener: an object containing the callbacks to be removed 919 */ 920 void memory_listener_unregister(MemoryListener *listener); 921 922 /** 923 * memory_global_dirty_log_start: begin dirty logging for all regions 924 */ 925 void memory_global_dirty_log_start(void); 926 927 /** 928 * memory_global_dirty_log_stop: end dirty logging for all regions 929 */ 930 void memory_global_dirty_log_stop(void); 931 932 void mtree_info(fprintf_function mon_printf, void *f); 933 934 /** 935 * address_space_init: initializes an address space 936 * 937 * @as: an uninitialized #AddressSpace 938 * @root: a #MemoryRegion that routes addesses for the address space 939 * @name: an address space name. The name is only used for debugging 940 * output. 941 */ 942 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 943 944 945 /** 946 * address_space_destroy: destroy an address space 947 * 948 * Releases all resources associated with an address space. After an address space 949 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 950 * as well. 951 * 952 * @as: address space to be destroyed 953 */ 954 void address_space_destroy(AddressSpace *as); 955 956 /** 957 * address_space_rw: read from or write to an address space. 958 * 959 * Return true if the operation hit any unassigned memory or encountered an 960 * IOMMU fault. 961 * 962 * @as: #AddressSpace to be accessed 963 * @addr: address within that address space 964 * @buf: buffer with the data transferred 965 * @is_write: indicates the transfer direction 966 */ 967 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, 968 int len, bool is_write); 969 970 /** 971 * address_space_write: write to address space. 972 * 973 * Return true if the operation hit any unassigned memory or encountered an 974 * IOMMU fault. 975 * 976 * @as: #AddressSpace to be accessed 977 * @addr: address within that address space 978 * @buf: buffer with the data transferred 979 */ 980 bool address_space_write(AddressSpace *as, hwaddr addr, 981 const uint8_t *buf, int len); 982 983 /** 984 * address_space_read: read from an address space. 985 * 986 * Return true if the operation hit any unassigned memory or encountered an 987 * IOMMU fault. 988 * 989 * @as: #AddressSpace to be accessed 990 * @addr: address within that address space 991 * @buf: buffer with the data transferred 992 */ 993 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len); 994 995 /* address_space_translate: translate an address range into an address space 996 * into a MemoryRegion and an address range into that section 997 * 998 * @as: #AddressSpace to be accessed 999 * @addr: address within that address space 1000 * @xlat: pointer to address within the returned memory region section's 1001 * #MemoryRegion. 1002 * @len: pointer to length 1003 * @is_write: indicates the transfer direction 1004 */ 1005 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, 1006 hwaddr *xlat, hwaddr *len, 1007 bool is_write); 1008 1009 /* address_space_access_valid: check for validity of accessing an address 1010 * space range 1011 * 1012 * Check whether memory is assigned to the given address space range, and 1013 * access is permitted by any IOMMU regions that are active for the address 1014 * space. 1015 * 1016 * For now, addr and len should be aligned to a page size. This limitation 1017 * will be lifted in the future. 1018 * 1019 * @as: #AddressSpace to be accessed 1020 * @addr: address within that address space 1021 * @len: length of the area to be checked 1022 * @is_write: indicates the transfer direction 1023 */ 1024 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write); 1025 1026 /* address_space_map: map a physical memory region into a host virtual address 1027 * 1028 * May map a subset of the requested range, given by and returned in @plen. 1029 * May return %NULL if resources needed to perform the mapping are exhausted. 1030 * Use only for reads OR writes - not for read-modify-write operations. 1031 * Use cpu_register_map_client() to know when retrying the map operation is 1032 * likely to succeed. 1033 * 1034 * @as: #AddressSpace to be accessed 1035 * @addr: address within that address space 1036 * @plen: pointer to length of buffer; updated on return 1037 * @is_write: indicates the transfer direction 1038 */ 1039 void *address_space_map(AddressSpace *as, hwaddr addr, 1040 hwaddr *plen, bool is_write); 1041 1042 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 1043 * 1044 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 1045 * the amount of memory that was actually read or written by the caller. 1046 * 1047 * @as: #AddressSpace used 1048 * @addr: address within that address space 1049 * @len: buffer length as returned by address_space_map() 1050 * @access_len: amount of data actually transferred 1051 * @is_write: indicates the transfer direction 1052 */ 1053 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 1054 int is_write, hwaddr access_len); 1055 1056 1057 #endif 1058 1059 #endif 1060