1 /* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef MEMORY_H 15 #define MEMORY_H 16 17 #ifndef CONFIG_USER_ONLY 18 19 #define DIRTY_MEMORY_VGA 0 20 #define DIRTY_MEMORY_CODE 1 21 #define DIRTY_MEMORY_MIGRATION 2 22 #define DIRTY_MEMORY_NUM 3 /* num of dirty bits */ 23 24 #include <stdint.h> 25 #include <stdbool.h> 26 #include "qemu-common.h" 27 #include "exec/cpu-common.h" 28 #ifndef CONFIG_USER_ONLY 29 #include "exec/hwaddr.h" 30 #endif 31 #include "qemu/queue.h" 32 #include "qemu/int128.h" 33 #include "qemu/notify.h" 34 #include "qapi/error.h" 35 #include "qom/object.h" 36 37 #define MAX_PHYS_ADDR_SPACE_BITS 62 38 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 39 40 #define TYPE_MEMORY_REGION "qemu:memory-region" 41 #define MEMORY_REGION(obj) \ 42 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION) 43 44 typedef struct MemoryRegionOps MemoryRegionOps; 45 typedef struct MemoryRegionMmio MemoryRegionMmio; 46 47 struct MemoryRegionMmio { 48 CPUReadMemoryFunc *read[3]; 49 CPUWriteMemoryFunc *write[3]; 50 }; 51 52 typedef struct IOMMUTLBEntry IOMMUTLBEntry; 53 54 /* See address_space_translate: bit 0 is read, bit 1 is write. */ 55 typedef enum { 56 IOMMU_NONE = 0, 57 IOMMU_RO = 1, 58 IOMMU_WO = 2, 59 IOMMU_RW = 3, 60 } IOMMUAccessFlags; 61 62 struct IOMMUTLBEntry { 63 AddressSpace *target_as; 64 hwaddr iova; 65 hwaddr translated_addr; 66 hwaddr addr_mask; /* 0xfff = 4k translation */ 67 IOMMUAccessFlags perm; 68 }; 69 70 /* 71 * Memory region callbacks 72 */ 73 struct MemoryRegionOps { 74 /* Read from the memory region. @addr is relative to @mr; @size is 75 * in bytes. */ 76 uint64_t (*read)(void *opaque, 77 hwaddr addr, 78 unsigned size); 79 /* Write to the memory region. @addr is relative to @mr; @size is 80 * in bytes. */ 81 void (*write)(void *opaque, 82 hwaddr addr, 83 uint64_t data, 84 unsigned size); 85 86 enum device_endian endianness; 87 /* Guest-visible constraints: */ 88 struct { 89 /* If nonzero, specify bounds on access sizes beyond which a machine 90 * check is thrown. 91 */ 92 unsigned min_access_size; 93 unsigned max_access_size; 94 /* If true, unaligned accesses are supported. Otherwise unaligned 95 * accesses throw machine checks. 96 */ 97 bool unaligned; 98 /* 99 * If present, and returns #false, the transaction is not accepted 100 * by the device (and results in machine dependent behaviour such 101 * as a machine check exception). 102 */ 103 bool (*accepts)(void *opaque, hwaddr addr, 104 unsigned size, bool is_write); 105 } valid; 106 /* Internal implementation constraints: */ 107 struct { 108 /* If nonzero, specifies the minimum size implemented. Smaller sizes 109 * will be rounded upwards and a partial result will be returned. 110 */ 111 unsigned min_access_size; 112 /* If nonzero, specifies the maximum size implemented. Larger sizes 113 * will be done as a series of accesses with smaller sizes. 114 */ 115 unsigned max_access_size; 116 /* If true, unaligned accesses are supported. Otherwise all accesses 117 * are converted to (possibly multiple) naturally aligned accesses. 118 */ 119 bool unaligned; 120 } impl; 121 122 /* If .read and .write are not present, old_mmio may be used for 123 * backwards compatibility with old mmio registration 124 */ 125 const MemoryRegionMmio old_mmio; 126 }; 127 128 typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps; 129 130 struct MemoryRegionIOMMUOps { 131 /* Return a TLB entry that contains a given address. */ 132 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); 133 }; 134 135 typedef struct CoalescedMemoryRange CoalescedMemoryRange; 136 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 137 138 struct MemoryRegion { 139 Object parent_obj; 140 /* All fields are private - violators will be prosecuted */ 141 const MemoryRegionOps *ops; 142 const MemoryRegionIOMMUOps *iommu_ops; 143 void *opaque; 144 MemoryRegion *container; 145 Int128 size; 146 hwaddr addr; 147 void (*destructor)(MemoryRegion *mr); 148 ram_addr_t ram_addr; 149 bool subpage; 150 bool terminates; 151 bool romd_mode; 152 bool ram; 153 bool skip_dump; 154 bool readonly; /* For RAM regions */ 155 bool enabled; 156 bool rom_device; 157 bool warning_printed; /* For reservations */ 158 bool flush_coalesced_mmio; 159 MemoryRegion *alias; 160 hwaddr alias_offset; 161 int32_t priority; 162 bool may_overlap; 163 QTAILQ_HEAD(subregions, MemoryRegion) subregions; 164 QTAILQ_ENTRY(MemoryRegion) subregions_link; 165 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced; 166 const char *name; 167 uint8_t dirty_log_mask; 168 unsigned ioeventfd_nb; 169 MemoryRegionIoeventfd *ioeventfds; 170 NotifierList iommu_notify; 171 }; 172 173 /** 174 * MemoryListener: callbacks structure for updates to the physical memory map 175 * 176 * Allows a component to adjust to changes in the guest-visible memory map. 177 * Use with memory_listener_register() and memory_listener_unregister(). 178 */ 179 struct MemoryListener { 180 void (*begin)(MemoryListener *listener); 181 void (*commit)(MemoryListener *listener); 182 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 183 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 184 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 185 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section); 186 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section); 187 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 188 void (*log_global_start)(MemoryListener *listener); 189 void (*log_global_stop)(MemoryListener *listener); 190 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 191 bool match_data, uint64_t data, EventNotifier *e); 192 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 193 bool match_data, uint64_t data, EventNotifier *e); 194 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, 195 hwaddr addr, hwaddr len); 196 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, 197 hwaddr addr, hwaddr len); 198 /* Lower = earlier (during add), later (during del) */ 199 unsigned priority; 200 AddressSpace *address_space_filter; 201 QTAILQ_ENTRY(MemoryListener) link; 202 }; 203 204 /** 205 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects 206 */ 207 struct AddressSpace { 208 /* All fields are private. */ 209 char *name; 210 MemoryRegion *root; 211 struct FlatView *current_map; 212 int ioeventfd_nb; 213 struct MemoryRegionIoeventfd *ioeventfds; 214 struct AddressSpaceDispatch *dispatch; 215 struct AddressSpaceDispatch *next_dispatch; 216 MemoryListener dispatch_listener; 217 218 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 219 }; 220 221 /** 222 * MemoryRegionSection: describes a fragment of a #MemoryRegion 223 * 224 * @mr: the region, or %NULL if empty 225 * @address_space: the address space the region is mapped in 226 * @offset_within_region: the beginning of the section, relative to @mr's start 227 * @size: the size of the section; will not exceed @mr's boundaries 228 * @offset_within_address_space: the address of the first byte of the section 229 * relative to the region's address space 230 * @readonly: writes to this section are ignored 231 */ 232 struct MemoryRegionSection { 233 MemoryRegion *mr; 234 AddressSpace *address_space; 235 hwaddr offset_within_region; 236 Int128 size; 237 hwaddr offset_within_address_space; 238 bool readonly; 239 }; 240 241 /** 242 * memory_region_init: Initialize a memory region 243 * 244 * The region typically acts as a container for other memory regions. Use 245 * memory_region_add_subregion() to add subregions. 246 * 247 * @mr: the #MemoryRegion to be initialized 248 * @owner: the object that tracks the region's reference count 249 * @name: used for debugging; not visible to the user or ABI 250 * @size: size of the region; any subregions beyond this size will be clipped 251 */ 252 void memory_region_init(MemoryRegion *mr, 253 struct Object *owner, 254 const char *name, 255 uint64_t size); 256 257 /** 258 * memory_region_ref: Add 1 to a memory region's reference count 259 * 260 * Whenever memory regions are accessed outside the BQL, they need to be 261 * preserved against hot-unplug. MemoryRegions actually do not have their 262 * own reference count; they piggyback on a QOM object, their "owner". 263 * This function adds a reference to the owner. 264 * 265 * All MemoryRegions must have an owner if they can disappear, even if the 266 * device they belong to operates exclusively under the BQL. This is because 267 * the region could be returned at any time by memory_region_find, and this 268 * is usually under guest control. 269 * 270 * @mr: the #MemoryRegion 271 */ 272 void memory_region_ref(MemoryRegion *mr); 273 274 /** 275 * memory_region_unref: Remove 1 to a memory region's reference count 276 * 277 * Whenever memory regions are accessed outside the BQL, they need to be 278 * preserved against hot-unplug. MemoryRegions actually do not have their 279 * own reference count; they piggyback on a QOM object, their "owner". 280 * This function removes a reference to the owner and possibly destroys it. 281 * 282 * @mr: the #MemoryRegion 283 */ 284 void memory_region_unref(MemoryRegion *mr); 285 286 /** 287 * memory_region_init_io: Initialize an I/O memory region. 288 * 289 * Accesses into the region will cause the callbacks in @ops to be called. 290 * if @size is nonzero, subregions will be clipped to @size. 291 * 292 * @mr: the #MemoryRegion to be initialized. 293 * @owner: the object that tracks the region's reference count 294 * @ops: a structure containing read and write callbacks to be used when 295 * I/O is performed on the region. 296 * @opaque: passed to to the read and write callbacks of the @ops structure. 297 * @name: used for debugging; not visible to the user or ABI 298 * @size: size of the region. 299 */ 300 void memory_region_init_io(MemoryRegion *mr, 301 struct Object *owner, 302 const MemoryRegionOps *ops, 303 void *opaque, 304 const char *name, 305 uint64_t size); 306 307 /** 308 * memory_region_init_ram: Initialize RAM memory region. Accesses into the 309 * region will modify memory directly. 310 * 311 * @mr: the #MemoryRegion to be initialized. 312 * @owner: the object that tracks the region's reference count 313 * @name: the name of the region. 314 * @size: size of the region. 315 * @errp: pointer to Error*, to store an error if it happens. 316 */ 317 void memory_region_init_ram(MemoryRegion *mr, 318 struct Object *owner, 319 const char *name, 320 uint64_t size, 321 Error **errp); 322 323 #ifdef __linux__ 324 /** 325 * memory_region_init_ram_from_file: Initialize RAM memory region with a 326 * mmap-ed backend. 327 * 328 * @mr: the #MemoryRegion to be initialized. 329 * @owner: the object that tracks the region's reference count 330 * @name: the name of the region. 331 * @size: size of the region. 332 * @share: %true if memory must be mmaped with the MAP_SHARED flag 333 * @path: the path in which to allocate the RAM. 334 * @errp: pointer to Error*, to store an error if it happens. 335 */ 336 void memory_region_init_ram_from_file(MemoryRegion *mr, 337 struct Object *owner, 338 const char *name, 339 uint64_t size, 340 bool share, 341 const char *path, 342 Error **errp); 343 #endif 344 345 /** 346 * memory_region_init_ram_ptr: Initialize RAM memory region from a 347 * user-provided pointer. Accesses into the 348 * region will modify memory directly. 349 * 350 * @mr: the #MemoryRegion to be initialized. 351 * @owner: the object that tracks the region's reference count 352 * @name: the name of the region. 353 * @size: size of the region. 354 * @ptr: memory to be mapped; must contain at least @size bytes. 355 */ 356 void memory_region_init_ram_ptr(MemoryRegion *mr, 357 struct Object *owner, 358 const char *name, 359 uint64_t size, 360 void *ptr); 361 362 /** 363 * memory_region_init_alias: Initialize a memory region that aliases all or a 364 * part of another memory region. 365 * 366 * @mr: the #MemoryRegion to be initialized. 367 * @owner: the object that tracks the region's reference count 368 * @name: used for debugging; not visible to the user or ABI 369 * @orig: the region to be referenced; @mr will be equivalent to 370 * @orig between @offset and @offset + @size - 1. 371 * @offset: start of the section in @orig to be referenced. 372 * @size: size of the region. 373 */ 374 void memory_region_init_alias(MemoryRegion *mr, 375 struct Object *owner, 376 const char *name, 377 MemoryRegion *orig, 378 hwaddr offset, 379 uint64_t size); 380 381 /** 382 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are 383 * handled via callbacks. 384 * 385 * @mr: the #MemoryRegion to be initialized. 386 * @owner: the object that tracks the region's reference count 387 * @ops: callbacks for write access handling. 388 * @name: the name of the region. 389 * @size: size of the region. 390 * @errp: pointer to Error*, to store an error if it happens. 391 */ 392 void memory_region_init_rom_device(MemoryRegion *mr, 393 struct Object *owner, 394 const MemoryRegionOps *ops, 395 void *opaque, 396 const char *name, 397 uint64_t size, 398 Error **errp); 399 400 /** 401 * memory_region_init_reservation: Initialize a memory region that reserves 402 * I/O space. 403 * 404 * A reservation region primariy serves debugging purposes. It claims I/O 405 * space that is not supposed to be handled by QEMU itself. Any access via 406 * the memory API will cause an abort(). 407 * 408 * @mr: the #MemoryRegion to be initialized 409 * @owner: the object that tracks the region's reference count 410 * @name: used for debugging; not visible to the user or ABI 411 * @size: size of the region. 412 */ 413 void memory_region_init_reservation(MemoryRegion *mr, 414 struct Object *owner, 415 const char *name, 416 uint64_t size); 417 418 /** 419 * memory_region_init_iommu: Initialize a memory region that translates 420 * addresses 421 * 422 * An IOMMU region translates addresses and forwards accesses to a target 423 * memory region. 424 * 425 * @mr: the #MemoryRegion to be initialized 426 * @owner: the object that tracks the region's reference count 427 * @ops: a function that translates addresses into the @target region 428 * @name: used for debugging; not visible to the user or ABI 429 * @size: size of the region. 430 */ 431 void memory_region_init_iommu(MemoryRegion *mr, 432 struct Object *owner, 433 const MemoryRegionIOMMUOps *ops, 434 const char *name, 435 uint64_t size); 436 437 /** 438 * memory_region_owner: get a memory region's owner. 439 * 440 * @mr: the memory region being queried. 441 */ 442 struct Object *memory_region_owner(MemoryRegion *mr); 443 444 /** 445 * memory_region_size: get a memory region's size. 446 * 447 * @mr: the memory region being queried. 448 */ 449 uint64_t memory_region_size(MemoryRegion *mr); 450 451 /** 452 * memory_region_is_ram: check whether a memory region is random access 453 * 454 * Returns %true is a memory region is random access. 455 * 456 * @mr: the memory region being queried 457 */ 458 bool memory_region_is_ram(MemoryRegion *mr); 459 460 /** 461 * memory_region_is_skip_dump: check whether a memory region should not be 462 * dumped 463 * 464 * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP). 465 * 466 * @mr: the memory region being queried 467 */ 468 bool memory_region_is_skip_dump(MemoryRegion *mr); 469 470 /** 471 * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory 472 * region 473 * 474 * @mr: the memory region being queried 475 */ 476 void memory_region_set_skip_dump(MemoryRegion *mr); 477 478 /** 479 * memory_region_is_romd: check whether a memory region is in ROMD mode 480 * 481 * Returns %true if a memory region is a ROM device and currently set to allow 482 * direct reads. 483 * 484 * @mr: the memory region being queried 485 */ 486 static inline bool memory_region_is_romd(MemoryRegion *mr) 487 { 488 return mr->rom_device && mr->romd_mode; 489 } 490 491 /** 492 * memory_region_is_iommu: check whether a memory region is an iommu 493 * 494 * Returns %true is a memory region is an iommu. 495 * 496 * @mr: the memory region being queried 497 */ 498 bool memory_region_is_iommu(MemoryRegion *mr); 499 500 /** 501 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 502 * 503 * @mr: the memory region that was changed 504 * @entry: the new entry in the IOMMU translation table. The entry 505 * replaces all old entries for the same virtual I/O address range. 506 * Deleted entries have .@perm == 0. 507 */ 508 void memory_region_notify_iommu(MemoryRegion *mr, 509 IOMMUTLBEntry entry); 510 511 /** 512 * memory_region_register_iommu_notifier: register a notifier for changes to 513 * IOMMU translation entries. 514 * 515 * @mr: the memory region to observe 516 * @n: the notifier to be added; the notifier receives a pointer to an 517 * #IOMMUTLBEntry as the opaque value; the pointer ceases to be 518 * valid on exit from the notifier. 519 */ 520 void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); 521 522 /** 523 * memory_region_unregister_iommu_notifier: unregister a notifier for 524 * changes to IOMMU translation entries. 525 * 526 * @n: the notifier to be removed. 527 */ 528 void memory_region_unregister_iommu_notifier(Notifier *n); 529 530 /** 531 * memory_region_name: get a memory region's name 532 * 533 * Returns the string that was used to initialize the memory region. 534 * 535 * @mr: the memory region being queried 536 */ 537 const char *memory_region_name(const MemoryRegion *mr); 538 539 /** 540 * memory_region_is_logging: return whether a memory region is logging writes 541 * 542 * Returns %true if the memory region is logging writes 543 * 544 * @mr: the memory region being queried 545 */ 546 bool memory_region_is_logging(MemoryRegion *mr); 547 548 /** 549 * memory_region_is_rom: check whether a memory region is ROM 550 * 551 * Returns %true is a memory region is read-only memory. 552 * 553 * @mr: the memory region being queried 554 */ 555 bool memory_region_is_rom(MemoryRegion *mr); 556 557 /** 558 * memory_region_get_fd: Get a file descriptor backing a RAM memory region. 559 * 560 * Returns a file descriptor backing a file-based RAM memory region, 561 * or -1 if the region is not a file-based RAM memory region. 562 * 563 * @mr: the RAM or alias memory region being queried. 564 */ 565 int memory_region_get_fd(MemoryRegion *mr); 566 567 /** 568 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 569 * 570 * Returns a host pointer to a RAM memory region (created with 571 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with 572 * care. 573 * 574 * @mr: the memory region being queried. 575 */ 576 void *memory_region_get_ram_ptr(MemoryRegion *mr); 577 578 /** 579 * memory_region_set_log: Turn dirty logging on or off for a region. 580 * 581 * Turns dirty logging on or off for a specified client (display, migration). 582 * Only meaningful for RAM regions. 583 * 584 * @mr: the memory region being updated. 585 * @log: whether dirty logging is to be enabled or disabled. 586 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 587 * %DIRTY_MEMORY_VGA. 588 */ 589 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 590 591 /** 592 * memory_region_get_dirty: Check whether a range of bytes is dirty 593 * for a specified client. 594 * 595 * Checks whether a range of bytes has been written to since the last 596 * call to memory_region_reset_dirty() with the same @client. Dirty logging 597 * must be enabled. 598 * 599 * @mr: the memory region being queried. 600 * @addr: the address (relative to the start of the region) being queried. 601 * @size: the size of the range being queried. 602 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 603 * %DIRTY_MEMORY_VGA. 604 */ 605 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, 606 hwaddr size, unsigned client); 607 608 /** 609 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 610 * 611 * Marks a range of bytes as dirty, after it has been dirtied outside 612 * guest code. 613 * 614 * @mr: the memory region being dirtied. 615 * @addr: the address (relative to the start of the region) being dirtied. 616 * @size: size of the range being dirtied. 617 */ 618 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 619 hwaddr size); 620 621 /** 622 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty 623 * for a specified client. It clears them. 624 * 625 * Checks whether a range of bytes has been written to since the last 626 * call to memory_region_reset_dirty() with the same @client. Dirty logging 627 * must be enabled. 628 * 629 * @mr: the memory region being queried. 630 * @addr: the address (relative to the start of the region) being queried. 631 * @size: the size of the range being queried. 632 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 633 * %DIRTY_MEMORY_VGA. 634 */ 635 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, 636 hwaddr size, unsigned client); 637 /** 638 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with 639 * any external TLBs (e.g. kvm) 640 * 641 * Flushes dirty information from accelerators such as kvm and vhost-net 642 * and makes it available to users of the memory API. 643 * 644 * @mr: the region being flushed. 645 */ 646 void memory_region_sync_dirty_bitmap(MemoryRegion *mr); 647 648 /** 649 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 650 * client. 651 * 652 * Marks a range of pages as no longer dirty. 653 * 654 * @mr: the region being updated. 655 * @addr: the start of the subrange being cleaned. 656 * @size: the size of the subrange being cleaned. 657 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 658 * %DIRTY_MEMORY_VGA. 659 */ 660 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 661 hwaddr size, unsigned client); 662 663 /** 664 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 665 * 666 * Allows a memory region to be marked as read-only (turning it into a ROM). 667 * only useful on RAM regions. 668 * 669 * @mr: the region being updated. 670 * @readonly: whether rhe region is to be ROM or RAM. 671 */ 672 void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 673 674 /** 675 * memory_region_rom_device_set_romd: enable/disable ROMD mode 676 * 677 * Allows a ROM device (initialized with memory_region_init_rom_device() to 678 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 679 * device is mapped to guest memory and satisfies read access directly. 680 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 681 * Writes are always handled by the #MemoryRegion.write function. 682 * 683 * @mr: the memory region to be updated 684 * @romd_mode: %true to put the region into ROMD mode 685 */ 686 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 687 688 /** 689 * memory_region_set_coalescing: Enable memory coalescing for the region. 690 * 691 * Enabled writes to a region to be queued for later processing. MMIO ->write 692 * callbacks may be delayed until a non-coalesced MMIO is issued. 693 * Only useful for IO regions. Roughly similar to write-combining hardware. 694 * 695 * @mr: the memory region to be write coalesced 696 */ 697 void memory_region_set_coalescing(MemoryRegion *mr); 698 699 /** 700 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 701 * a region. 702 * 703 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 704 * Multiple calls can be issued coalesced disjoint ranges. 705 * 706 * @mr: the memory region to be updated. 707 * @offset: the start of the range within the region to be coalesced. 708 * @size: the size of the subrange to be coalesced. 709 */ 710 void memory_region_add_coalescing(MemoryRegion *mr, 711 hwaddr offset, 712 uint64_t size); 713 714 /** 715 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 716 * 717 * Disables any coalescing caused by memory_region_set_coalescing() or 718 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 719 * hardware. 720 * 721 * @mr: the memory region to be updated. 722 */ 723 void memory_region_clear_coalescing(MemoryRegion *mr); 724 725 /** 726 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 727 * accesses. 728 * 729 * Ensure that pending coalesced MMIO request are flushed before the memory 730 * region is accessed. This property is automatically enabled for all regions 731 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 732 * 733 * @mr: the memory region to be updated. 734 */ 735 void memory_region_set_flush_coalesced(MemoryRegion *mr); 736 737 /** 738 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 739 * accesses. 740 * 741 * Clear the automatic coalesced MMIO flushing enabled via 742 * memory_region_set_flush_coalesced. Note that this service has no effect on 743 * memory regions that have MMIO coalescing enabled for themselves. For them, 744 * automatic flushing will stop once coalescing is disabled. 745 * 746 * @mr: the memory region to be updated. 747 */ 748 void memory_region_clear_flush_coalesced(MemoryRegion *mr); 749 750 /** 751 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 752 * is written to a location. 753 * 754 * Marks a word in an IO region (initialized with memory_region_init_io()) 755 * as a trigger for an eventfd event. The I/O callback will not be called. 756 * The caller must be prepared to handle failure (that is, take the required 757 * action if the callback _is_ called). 758 * 759 * @mr: the memory region being updated. 760 * @addr: the address within @mr that is to be monitored 761 * @size: the size of the access to trigger the eventfd 762 * @match_data: whether to match against @data, instead of just @addr 763 * @data: the data to match against the guest write 764 * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 765 **/ 766 void memory_region_add_eventfd(MemoryRegion *mr, 767 hwaddr addr, 768 unsigned size, 769 bool match_data, 770 uint64_t data, 771 EventNotifier *e); 772 773 /** 774 * memory_region_del_eventfd: Cancel an eventfd. 775 * 776 * Cancels an eventfd trigger requested by a previous 777 * memory_region_add_eventfd() call. 778 * 779 * @mr: the memory region being updated. 780 * @addr: the address within @mr that is to be monitored 781 * @size: the size of the access to trigger the eventfd 782 * @match_data: whether to match against @data, instead of just @addr 783 * @data: the data to match against the guest write 784 * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 785 */ 786 void memory_region_del_eventfd(MemoryRegion *mr, 787 hwaddr addr, 788 unsigned size, 789 bool match_data, 790 uint64_t data, 791 EventNotifier *e); 792 793 /** 794 * memory_region_add_subregion: Add a subregion to a container. 795 * 796 * Adds a subregion at @offset. The subregion may not overlap with other 797 * subregions (except for those explicitly marked as overlapping). A region 798 * may only be added once as a subregion (unless removed with 799 * memory_region_del_subregion()); use memory_region_init_alias() if you 800 * want a region to be a subregion in multiple locations. 801 * 802 * @mr: the region to contain the new subregion; must be a container 803 * initialized with memory_region_init(). 804 * @offset: the offset relative to @mr where @subregion is added. 805 * @subregion: the subregion to be added. 806 */ 807 void memory_region_add_subregion(MemoryRegion *mr, 808 hwaddr offset, 809 MemoryRegion *subregion); 810 /** 811 * memory_region_add_subregion_overlap: Add a subregion to a container 812 * with overlap. 813 * 814 * Adds a subregion at @offset. The subregion may overlap with other 815 * subregions. Conflicts are resolved by having a higher @priority hide a 816 * lower @priority. Subregions without priority are taken as @priority 0. 817 * A region may only be added once as a subregion (unless removed with 818 * memory_region_del_subregion()); use memory_region_init_alias() if you 819 * want a region to be a subregion in multiple locations. 820 * 821 * @mr: the region to contain the new subregion; must be a container 822 * initialized with memory_region_init(). 823 * @offset: the offset relative to @mr where @subregion is added. 824 * @subregion: the subregion to be added. 825 * @priority: used for resolving overlaps; highest priority wins. 826 */ 827 void memory_region_add_subregion_overlap(MemoryRegion *mr, 828 hwaddr offset, 829 MemoryRegion *subregion, 830 int priority); 831 832 /** 833 * memory_region_get_ram_addr: Get the ram address associated with a memory 834 * region 835 * 836 * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen 837 * code is being reworked. 838 */ 839 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 840 841 /** 842 * memory_region_del_subregion: Remove a subregion. 843 * 844 * Removes a subregion from its container. 845 * 846 * @mr: the container to be updated. 847 * @subregion: the region being removed; must be a current subregion of @mr. 848 */ 849 void memory_region_del_subregion(MemoryRegion *mr, 850 MemoryRegion *subregion); 851 852 /* 853 * memory_region_set_enabled: dynamically enable or disable a region 854 * 855 * Enables or disables a memory region. A disabled memory region 856 * ignores all accesses to itself and its subregions. It does not 857 * obscure sibling subregions with lower priority - it simply behaves as 858 * if it was removed from the hierarchy. 859 * 860 * Regions default to being enabled. 861 * 862 * @mr: the region to be updated 863 * @enabled: whether to enable or disable the region 864 */ 865 void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 866 867 /* 868 * memory_region_set_address: dynamically update the address of a region 869 * 870 * Dynamically updates the address of a region, relative to its container. 871 * May be used on regions are currently part of a memory hierarchy. 872 * 873 * @mr: the region to be updated 874 * @addr: new address, relative to container region 875 */ 876 void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 877 878 /* 879 * memory_region_set_alias_offset: dynamically update a memory alias's offset 880 * 881 * Dynamically updates the offset into the target region that an alias points 882 * to, as if the fourth argument to memory_region_init_alias() has changed. 883 * 884 * @mr: the #MemoryRegion to be updated; should be an alias. 885 * @offset: the new offset into the target memory region 886 */ 887 void memory_region_set_alias_offset(MemoryRegion *mr, 888 hwaddr offset); 889 890 /** 891 * memory_region_present: checks if an address relative to a @container 892 * translates into #MemoryRegion within @container 893 * 894 * Answer whether a #MemoryRegion within @container covers the address 895 * @addr. 896 * 897 * @container: a #MemoryRegion within which @addr is a relative address 898 * @addr: the area within @container to be searched 899 */ 900 bool memory_region_present(MemoryRegion *container, hwaddr addr); 901 902 /** 903 * memory_region_is_mapped: returns true if #MemoryRegion is mapped 904 * into any address space. 905 * 906 * @mr: a #MemoryRegion which should be checked if it's mapped 907 */ 908 bool memory_region_is_mapped(MemoryRegion *mr); 909 910 /** 911 * memory_region_find: translate an address/size relative to a 912 * MemoryRegion into a #MemoryRegionSection. 913 * 914 * Locates the first #MemoryRegion within @mr that overlaps the range 915 * given by @addr and @size. 916 * 917 * Returns a #MemoryRegionSection that describes a contiguous overlap. 918 * It will have the following characteristics: 919 * .@size = 0 iff no overlap was found 920 * .@mr is non-%NULL iff an overlap was found 921 * 922 * Remember that in the return value the @offset_within_region is 923 * relative to the returned region (in the .@mr field), not to the 924 * @mr argument. 925 * 926 * Similarly, the .@offset_within_address_space is relative to the 927 * address space that contains both regions, the passed and the 928 * returned one. However, in the special case where the @mr argument 929 * has no container (and thus is the root of the address space), the 930 * following will hold: 931 * .@offset_within_address_space >= @addr 932 * .@offset_within_address_space + .@size <= @addr + @size 933 * 934 * @mr: a MemoryRegion within which @addr is a relative address 935 * @addr: start of the area within @as to be searched 936 * @size: size of the area to be searched 937 */ 938 MemoryRegionSection memory_region_find(MemoryRegion *mr, 939 hwaddr addr, uint64_t size); 940 941 /** 942 * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory 943 * 944 * Synchronizes the dirty page log for an entire address space. 945 * @as: the address space that contains the memory being synchronized 946 */ 947 void address_space_sync_dirty_bitmap(AddressSpace *as); 948 949 /** 950 * memory_region_transaction_begin: Start a transaction. 951 * 952 * During a transaction, changes will be accumulated and made visible 953 * only when the transaction ends (is committed). 954 */ 955 void memory_region_transaction_begin(void); 956 957 /** 958 * memory_region_transaction_commit: Commit a transaction and make changes 959 * visible to the guest. 960 */ 961 void memory_region_transaction_commit(void); 962 963 /** 964 * memory_listener_register: register callbacks to be called when memory 965 * sections are mapped or unmapped into an address 966 * space 967 * 968 * @listener: an object containing the callbacks to be called 969 * @filter: if non-%NULL, only regions in this address space will be observed 970 */ 971 void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 972 973 /** 974 * memory_listener_unregister: undo the effect of memory_listener_register() 975 * 976 * @listener: an object containing the callbacks to be removed 977 */ 978 void memory_listener_unregister(MemoryListener *listener); 979 980 /** 981 * memory_global_dirty_log_start: begin dirty logging for all regions 982 */ 983 void memory_global_dirty_log_start(void); 984 985 /** 986 * memory_global_dirty_log_stop: end dirty logging for all regions 987 */ 988 void memory_global_dirty_log_stop(void); 989 990 void mtree_info(fprintf_function mon_printf, void *f); 991 992 /** 993 * address_space_init: initializes an address space 994 * 995 * @as: an uninitialized #AddressSpace 996 * @root: a #MemoryRegion that routes addesses for the address space 997 * @name: an address space name. The name is only used for debugging 998 * output. 999 */ 1000 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 1001 1002 1003 /** 1004 * address_space_destroy: destroy an address space 1005 * 1006 * Releases all resources associated with an address space. After an address space 1007 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 1008 * as well. 1009 * 1010 * @as: address space to be destroyed 1011 */ 1012 void address_space_destroy(AddressSpace *as); 1013 1014 /** 1015 * address_space_rw: read from or write to an address space. 1016 * 1017 * Return true if the operation hit any unassigned memory or encountered an 1018 * IOMMU fault. 1019 * 1020 * @as: #AddressSpace to be accessed 1021 * @addr: address within that address space 1022 * @buf: buffer with the data transferred 1023 * @is_write: indicates the transfer direction 1024 */ 1025 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, 1026 int len, bool is_write); 1027 1028 /** 1029 * address_space_write: write to address space. 1030 * 1031 * Return true if the operation hit any unassigned memory or encountered an 1032 * IOMMU fault. 1033 * 1034 * @as: #AddressSpace to be accessed 1035 * @addr: address within that address space 1036 * @buf: buffer with the data transferred 1037 */ 1038 bool address_space_write(AddressSpace *as, hwaddr addr, 1039 const uint8_t *buf, int len); 1040 1041 /** 1042 * address_space_read: read from an address space. 1043 * 1044 * Return true if the operation hit any unassigned memory or encountered an 1045 * IOMMU fault. 1046 * 1047 * @as: #AddressSpace to be accessed 1048 * @addr: address within that address space 1049 * @buf: buffer with the data transferred 1050 */ 1051 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len); 1052 1053 /* address_space_translate: translate an address range into an address space 1054 * into a MemoryRegion and an address range into that section 1055 * 1056 * @as: #AddressSpace to be accessed 1057 * @addr: address within that address space 1058 * @xlat: pointer to address within the returned memory region section's 1059 * #MemoryRegion. 1060 * @len: pointer to length 1061 * @is_write: indicates the transfer direction 1062 */ 1063 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, 1064 hwaddr *xlat, hwaddr *len, 1065 bool is_write); 1066 1067 /* address_space_access_valid: check for validity of accessing an address 1068 * space range 1069 * 1070 * Check whether memory is assigned to the given address space range, and 1071 * access is permitted by any IOMMU regions that are active for the address 1072 * space. 1073 * 1074 * For now, addr and len should be aligned to a page size. This limitation 1075 * will be lifted in the future. 1076 * 1077 * @as: #AddressSpace to be accessed 1078 * @addr: address within that address space 1079 * @len: length of the area to be checked 1080 * @is_write: indicates the transfer direction 1081 */ 1082 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write); 1083 1084 /* address_space_map: map a physical memory region into a host virtual address 1085 * 1086 * May map a subset of the requested range, given by and returned in @plen. 1087 * May return %NULL if resources needed to perform the mapping are exhausted. 1088 * Use only for reads OR writes - not for read-modify-write operations. 1089 * Use cpu_register_map_client() to know when retrying the map operation is 1090 * likely to succeed. 1091 * 1092 * @as: #AddressSpace to be accessed 1093 * @addr: address within that address space 1094 * @plen: pointer to length of buffer; updated on return 1095 * @is_write: indicates the transfer direction 1096 */ 1097 void *address_space_map(AddressSpace *as, hwaddr addr, 1098 hwaddr *plen, bool is_write); 1099 1100 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 1101 * 1102 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 1103 * the amount of memory that was actually read or written by the caller. 1104 * 1105 * @as: #AddressSpace used 1106 * @addr: address within that address space 1107 * @len: buffer length as returned by address_space_map() 1108 * @access_len: amount of data actually transferred 1109 * @is_write: indicates the transfer direction 1110 */ 1111 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 1112 int is_write, hwaddr access_len); 1113 1114 1115 #endif 1116 1117 #endif 1118