1 /* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef MEMORY_H 15 #define MEMORY_H 16 17 #ifndef CONFIG_USER_ONLY 18 19 #define DIRTY_MEMORY_VGA 0 20 #define DIRTY_MEMORY_CODE 1 21 #define DIRTY_MEMORY_MIGRATION 2 22 #define DIRTY_MEMORY_NUM 3 /* num of dirty bits */ 23 24 #include <stdint.h> 25 #include <stdbool.h> 26 #include "qemu-common.h" 27 #include "exec/cpu-common.h" 28 #ifndef CONFIG_USER_ONLY 29 #include "exec/hwaddr.h" 30 #endif 31 #include "qemu/queue.h" 32 #include "qemu/int128.h" 33 #include "qemu/notify.h" 34 #include "qapi/error.h" 35 #include "qom/object.h" 36 #include "qemu/rcu.h" 37 38 #define MAX_PHYS_ADDR_SPACE_BITS 62 39 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 40 41 #define TYPE_MEMORY_REGION "qemu:memory-region" 42 #define MEMORY_REGION(obj) \ 43 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION) 44 45 typedef struct MemoryRegionOps MemoryRegionOps; 46 typedef struct MemoryRegionMmio MemoryRegionMmio; 47 48 struct MemoryRegionMmio { 49 CPUReadMemoryFunc *read[3]; 50 CPUWriteMemoryFunc *write[3]; 51 }; 52 53 typedef struct IOMMUTLBEntry IOMMUTLBEntry; 54 55 /* See address_space_translate: bit 0 is read, bit 1 is write. */ 56 typedef enum { 57 IOMMU_NONE = 0, 58 IOMMU_RO = 1, 59 IOMMU_WO = 2, 60 IOMMU_RW = 3, 61 } IOMMUAccessFlags; 62 63 struct IOMMUTLBEntry { 64 AddressSpace *target_as; 65 hwaddr iova; 66 hwaddr translated_addr; 67 hwaddr addr_mask; /* 0xfff = 4k translation */ 68 IOMMUAccessFlags perm; 69 }; 70 71 /* 72 * Memory region callbacks 73 */ 74 struct MemoryRegionOps { 75 /* Read from the memory region. @addr is relative to @mr; @size is 76 * in bytes. */ 77 uint64_t (*read)(void *opaque, 78 hwaddr addr, 79 unsigned size); 80 /* Write to the memory region. @addr is relative to @mr; @size is 81 * in bytes. */ 82 void (*write)(void *opaque, 83 hwaddr addr, 84 uint64_t data, 85 unsigned size); 86 87 enum device_endian endianness; 88 /* Guest-visible constraints: */ 89 struct { 90 /* If nonzero, specify bounds on access sizes beyond which a machine 91 * check is thrown. 92 */ 93 unsigned min_access_size; 94 unsigned max_access_size; 95 /* If true, unaligned accesses are supported. Otherwise unaligned 96 * accesses throw machine checks. 97 */ 98 bool unaligned; 99 /* 100 * If present, and returns #false, the transaction is not accepted 101 * by the device (and results in machine dependent behaviour such 102 * as a machine check exception). 103 */ 104 bool (*accepts)(void *opaque, hwaddr addr, 105 unsigned size, bool is_write); 106 } valid; 107 /* Internal implementation constraints: */ 108 struct { 109 /* If nonzero, specifies the minimum size implemented. Smaller sizes 110 * will be rounded upwards and a partial result will be returned. 111 */ 112 unsigned min_access_size; 113 /* If nonzero, specifies the maximum size implemented. Larger sizes 114 * will be done as a series of accesses with smaller sizes. 115 */ 116 unsigned max_access_size; 117 /* If true, unaligned accesses are supported. Otherwise all accesses 118 * are converted to (possibly multiple) naturally aligned accesses. 119 */ 120 bool unaligned; 121 } impl; 122 123 /* If .read and .write are not present, old_mmio may be used for 124 * backwards compatibility with old mmio registration 125 */ 126 const MemoryRegionMmio old_mmio; 127 }; 128 129 typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps; 130 131 struct MemoryRegionIOMMUOps { 132 /* Return a TLB entry that contains a given address. */ 133 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); 134 }; 135 136 typedef struct CoalescedMemoryRange CoalescedMemoryRange; 137 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 138 139 struct MemoryRegion { 140 Object parent_obj; 141 /* All fields are private - violators will be prosecuted */ 142 const MemoryRegionOps *ops; 143 const MemoryRegionIOMMUOps *iommu_ops; 144 void *opaque; 145 MemoryRegion *container; 146 Int128 size; 147 hwaddr addr; 148 void (*destructor)(MemoryRegion *mr); 149 ram_addr_t ram_addr; 150 uint64_t align; 151 bool subpage; 152 bool terminates; 153 bool romd_mode; 154 bool ram; 155 bool skip_dump; 156 bool readonly; /* For RAM regions */ 157 bool enabled; 158 bool rom_device; 159 bool warning_printed; /* For reservations */ 160 bool flush_coalesced_mmio; 161 MemoryRegion *alias; 162 hwaddr alias_offset; 163 int32_t priority; 164 bool may_overlap; 165 QTAILQ_HEAD(subregions, MemoryRegion) subregions; 166 QTAILQ_ENTRY(MemoryRegion) subregions_link; 167 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced; 168 const char *name; 169 uint8_t dirty_log_mask; 170 unsigned ioeventfd_nb; 171 MemoryRegionIoeventfd *ioeventfds; 172 NotifierList iommu_notify; 173 }; 174 175 /** 176 * MemoryListener: callbacks structure for updates to the physical memory map 177 * 178 * Allows a component to adjust to changes in the guest-visible memory map. 179 * Use with memory_listener_register() and memory_listener_unregister(). 180 */ 181 struct MemoryListener { 182 void (*begin)(MemoryListener *listener); 183 void (*commit)(MemoryListener *listener); 184 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 185 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 186 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 187 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section); 188 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section); 189 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 190 void (*log_global_start)(MemoryListener *listener); 191 void (*log_global_stop)(MemoryListener *listener); 192 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 193 bool match_data, uint64_t data, EventNotifier *e); 194 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 195 bool match_data, uint64_t data, EventNotifier *e); 196 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, 197 hwaddr addr, hwaddr len); 198 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, 199 hwaddr addr, hwaddr len); 200 /* Lower = earlier (during add), later (during del) */ 201 unsigned priority; 202 AddressSpace *address_space_filter; 203 QTAILQ_ENTRY(MemoryListener) link; 204 }; 205 206 /** 207 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects 208 */ 209 struct AddressSpace { 210 /* All fields are private. */ 211 struct rcu_head rcu; 212 char *name; 213 MemoryRegion *root; 214 215 /* Accessed via RCU. */ 216 struct FlatView *current_map; 217 218 int ioeventfd_nb; 219 struct MemoryRegionIoeventfd *ioeventfds; 220 struct AddressSpaceDispatch *dispatch; 221 struct AddressSpaceDispatch *next_dispatch; 222 MemoryListener dispatch_listener; 223 224 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 225 }; 226 227 /** 228 * MemoryRegionSection: describes a fragment of a #MemoryRegion 229 * 230 * @mr: the region, or %NULL if empty 231 * @address_space: the address space the region is mapped in 232 * @offset_within_region: the beginning of the section, relative to @mr's start 233 * @size: the size of the section; will not exceed @mr's boundaries 234 * @offset_within_address_space: the address of the first byte of the section 235 * relative to the region's address space 236 * @readonly: writes to this section are ignored 237 */ 238 struct MemoryRegionSection { 239 MemoryRegion *mr; 240 AddressSpace *address_space; 241 hwaddr offset_within_region; 242 Int128 size; 243 hwaddr offset_within_address_space; 244 bool readonly; 245 }; 246 247 /** 248 * memory_region_init: Initialize a memory region 249 * 250 * The region typically acts as a container for other memory regions. Use 251 * memory_region_add_subregion() to add subregions. 252 * 253 * @mr: the #MemoryRegion to be initialized 254 * @owner: the object that tracks the region's reference count 255 * @name: used for debugging; not visible to the user or ABI 256 * @size: size of the region; any subregions beyond this size will be clipped 257 */ 258 void memory_region_init(MemoryRegion *mr, 259 struct Object *owner, 260 const char *name, 261 uint64_t size); 262 263 /** 264 * memory_region_ref: Add 1 to a memory region's reference count 265 * 266 * Whenever memory regions are accessed outside the BQL, they need to be 267 * preserved against hot-unplug. MemoryRegions actually do not have their 268 * own reference count; they piggyback on a QOM object, their "owner". 269 * This function adds a reference to the owner. 270 * 271 * All MemoryRegions must have an owner if they can disappear, even if the 272 * device they belong to operates exclusively under the BQL. This is because 273 * the region could be returned at any time by memory_region_find, and this 274 * is usually under guest control. 275 * 276 * @mr: the #MemoryRegion 277 */ 278 void memory_region_ref(MemoryRegion *mr); 279 280 /** 281 * memory_region_unref: Remove 1 to a memory region's reference count 282 * 283 * Whenever memory regions are accessed outside the BQL, they need to be 284 * preserved against hot-unplug. MemoryRegions actually do not have their 285 * own reference count; they piggyback on a QOM object, their "owner". 286 * This function removes a reference to the owner and possibly destroys it. 287 * 288 * @mr: the #MemoryRegion 289 */ 290 void memory_region_unref(MemoryRegion *mr); 291 292 /** 293 * memory_region_init_io: Initialize an I/O memory region. 294 * 295 * Accesses into the region will cause the callbacks in @ops to be called. 296 * if @size is nonzero, subregions will be clipped to @size. 297 * 298 * @mr: the #MemoryRegion to be initialized. 299 * @owner: the object that tracks the region's reference count 300 * @ops: a structure containing read and write callbacks to be used when 301 * I/O is performed on the region. 302 * @opaque: passed to to the read and write callbacks of the @ops structure. 303 * @name: used for debugging; not visible to the user or ABI 304 * @size: size of the region. 305 */ 306 void memory_region_init_io(MemoryRegion *mr, 307 struct Object *owner, 308 const MemoryRegionOps *ops, 309 void *opaque, 310 const char *name, 311 uint64_t size); 312 313 /** 314 * memory_region_init_ram: Initialize RAM memory region. Accesses into the 315 * region will modify memory directly. 316 * 317 * @mr: the #MemoryRegion to be initialized. 318 * @owner: the object that tracks the region's reference count 319 * @name: the name of the region. 320 * @size: size of the region. 321 * @errp: pointer to Error*, to store an error if it happens. 322 */ 323 void memory_region_init_ram(MemoryRegion *mr, 324 struct Object *owner, 325 const char *name, 326 uint64_t size, 327 Error **errp); 328 329 /** 330 * memory_region_init_resizeable_ram: Initialize memory region with resizeable 331 * RAM. Accesses into the region will 332 * modify memory directly. Only an initial 333 * portion of this RAM is actually used. 334 * The used size can change across reboots. 335 * 336 * @mr: the #MemoryRegion to be initialized. 337 * @owner: the object that tracks the region's reference count 338 * @name: the name of the region. 339 * @size: used size of the region. 340 * @max_size: max size of the region. 341 * @resized: callback to notify owner about used size change. 342 * @errp: pointer to Error*, to store an error if it happens. 343 */ 344 void memory_region_init_resizeable_ram(MemoryRegion *mr, 345 struct Object *owner, 346 const char *name, 347 uint64_t size, 348 uint64_t max_size, 349 void (*resized)(const char*, 350 uint64_t length, 351 void *host), 352 Error **errp); 353 #ifdef __linux__ 354 /** 355 * memory_region_init_ram_from_file: Initialize RAM memory region with a 356 * mmap-ed backend. 357 * 358 * @mr: the #MemoryRegion to be initialized. 359 * @owner: the object that tracks the region's reference count 360 * @name: the name of the region. 361 * @size: size of the region. 362 * @share: %true if memory must be mmaped with the MAP_SHARED flag 363 * @path: the path in which to allocate the RAM. 364 * @errp: pointer to Error*, to store an error if it happens. 365 */ 366 void memory_region_init_ram_from_file(MemoryRegion *mr, 367 struct Object *owner, 368 const char *name, 369 uint64_t size, 370 bool share, 371 const char *path, 372 Error **errp); 373 #endif 374 375 /** 376 * memory_region_init_ram_ptr: Initialize RAM memory region from a 377 * user-provided pointer. Accesses into the 378 * region will modify memory directly. 379 * 380 * @mr: the #MemoryRegion to be initialized. 381 * @owner: the object that tracks the region's reference count 382 * @name: the name of the region. 383 * @size: size of the region. 384 * @ptr: memory to be mapped; must contain at least @size bytes. 385 */ 386 void memory_region_init_ram_ptr(MemoryRegion *mr, 387 struct Object *owner, 388 const char *name, 389 uint64_t size, 390 void *ptr); 391 392 /** 393 * memory_region_init_alias: Initialize a memory region that aliases all or a 394 * part of another memory region. 395 * 396 * @mr: the #MemoryRegion to be initialized. 397 * @owner: the object that tracks the region's reference count 398 * @name: used for debugging; not visible to the user or ABI 399 * @orig: the region to be referenced; @mr will be equivalent to 400 * @orig between @offset and @offset + @size - 1. 401 * @offset: start of the section in @orig to be referenced. 402 * @size: size of the region. 403 */ 404 void memory_region_init_alias(MemoryRegion *mr, 405 struct Object *owner, 406 const char *name, 407 MemoryRegion *orig, 408 hwaddr offset, 409 uint64_t size); 410 411 /** 412 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are 413 * handled via callbacks. 414 * 415 * @mr: the #MemoryRegion to be initialized. 416 * @owner: the object that tracks the region's reference count 417 * @ops: callbacks for write access handling. 418 * @name: the name of the region. 419 * @size: size of the region. 420 * @errp: pointer to Error*, to store an error if it happens. 421 */ 422 void memory_region_init_rom_device(MemoryRegion *mr, 423 struct Object *owner, 424 const MemoryRegionOps *ops, 425 void *opaque, 426 const char *name, 427 uint64_t size, 428 Error **errp); 429 430 /** 431 * memory_region_init_reservation: Initialize a memory region that reserves 432 * I/O space. 433 * 434 * A reservation region primariy serves debugging purposes. It claims I/O 435 * space that is not supposed to be handled by QEMU itself. Any access via 436 * the memory API will cause an abort(). 437 * 438 * @mr: the #MemoryRegion to be initialized 439 * @owner: the object that tracks the region's reference count 440 * @name: used for debugging; not visible to the user or ABI 441 * @size: size of the region. 442 */ 443 void memory_region_init_reservation(MemoryRegion *mr, 444 struct Object *owner, 445 const char *name, 446 uint64_t size); 447 448 /** 449 * memory_region_init_iommu: Initialize a memory region that translates 450 * addresses 451 * 452 * An IOMMU region translates addresses and forwards accesses to a target 453 * memory region. 454 * 455 * @mr: the #MemoryRegion to be initialized 456 * @owner: the object that tracks the region's reference count 457 * @ops: a function that translates addresses into the @target region 458 * @name: used for debugging; not visible to the user or ABI 459 * @size: size of the region. 460 */ 461 void memory_region_init_iommu(MemoryRegion *mr, 462 struct Object *owner, 463 const MemoryRegionIOMMUOps *ops, 464 const char *name, 465 uint64_t size); 466 467 /** 468 * memory_region_owner: get a memory region's owner. 469 * 470 * @mr: the memory region being queried. 471 */ 472 struct Object *memory_region_owner(MemoryRegion *mr); 473 474 /** 475 * memory_region_size: get a memory region's size. 476 * 477 * @mr: the memory region being queried. 478 */ 479 uint64_t memory_region_size(MemoryRegion *mr); 480 481 /** 482 * memory_region_is_ram: check whether a memory region is random access 483 * 484 * Returns %true is a memory region is random access. 485 * 486 * @mr: the memory region being queried 487 */ 488 bool memory_region_is_ram(MemoryRegion *mr); 489 490 /** 491 * memory_region_is_skip_dump: check whether a memory region should not be 492 * dumped 493 * 494 * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP). 495 * 496 * @mr: the memory region being queried 497 */ 498 bool memory_region_is_skip_dump(MemoryRegion *mr); 499 500 /** 501 * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory 502 * region 503 * 504 * @mr: the memory region being queried 505 */ 506 void memory_region_set_skip_dump(MemoryRegion *mr); 507 508 /** 509 * memory_region_is_romd: check whether a memory region is in ROMD mode 510 * 511 * Returns %true if a memory region is a ROM device and currently set to allow 512 * direct reads. 513 * 514 * @mr: the memory region being queried 515 */ 516 static inline bool memory_region_is_romd(MemoryRegion *mr) 517 { 518 return mr->rom_device && mr->romd_mode; 519 } 520 521 /** 522 * memory_region_is_iommu: check whether a memory region is an iommu 523 * 524 * Returns %true is a memory region is an iommu. 525 * 526 * @mr: the memory region being queried 527 */ 528 bool memory_region_is_iommu(MemoryRegion *mr); 529 530 /** 531 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 532 * 533 * @mr: the memory region that was changed 534 * @entry: the new entry in the IOMMU translation table. The entry 535 * replaces all old entries for the same virtual I/O address range. 536 * Deleted entries have .@perm == 0. 537 */ 538 void memory_region_notify_iommu(MemoryRegion *mr, 539 IOMMUTLBEntry entry); 540 541 /** 542 * memory_region_register_iommu_notifier: register a notifier for changes to 543 * IOMMU translation entries. 544 * 545 * @mr: the memory region to observe 546 * @n: the notifier to be added; the notifier receives a pointer to an 547 * #IOMMUTLBEntry as the opaque value; the pointer ceases to be 548 * valid on exit from the notifier. 549 */ 550 void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); 551 552 /** 553 * memory_region_unregister_iommu_notifier: unregister a notifier for 554 * changes to IOMMU translation entries. 555 * 556 * @n: the notifier to be removed. 557 */ 558 void memory_region_unregister_iommu_notifier(Notifier *n); 559 560 /** 561 * memory_region_name: get a memory region's name 562 * 563 * Returns the string that was used to initialize the memory region. 564 * 565 * @mr: the memory region being queried 566 */ 567 const char *memory_region_name(const MemoryRegion *mr); 568 569 /** 570 * memory_region_is_logging: return whether a memory region is logging writes 571 * 572 * Returns %true if the memory region is logging writes 573 * 574 * @mr: the memory region being queried 575 */ 576 bool memory_region_is_logging(MemoryRegion *mr); 577 578 /** 579 * memory_region_is_rom: check whether a memory region is ROM 580 * 581 * Returns %true is a memory region is read-only memory. 582 * 583 * @mr: the memory region being queried 584 */ 585 bool memory_region_is_rom(MemoryRegion *mr); 586 587 /** 588 * memory_region_get_fd: Get a file descriptor backing a RAM memory region. 589 * 590 * Returns a file descriptor backing a file-based RAM memory region, 591 * or -1 if the region is not a file-based RAM memory region. 592 * 593 * @mr: the RAM or alias memory region being queried. 594 */ 595 int memory_region_get_fd(MemoryRegion *mr); 596 597 /** 598 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 599 * 600 * Returns a host pointer to a RAM memory region (created with 601 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with 602 * care. 603 * 604 * @mr: the memory region being queried. 605 */ 606 void *memory_region_get_ram_ptr(MemoryRegion *mr); 607 608 /** 609 * memory_region_set_log: Turn dirty logging on or off for a region. 610 * 611 * Turns dirty logging on or off for a specified client (display, migration). 612 * Only meaningful for RAM regions. 613 * 614 * @mr: the memory region being updated. 615 * @log: whether dirty logging is to be enabled or disabled. 616 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 617 * %DIRTY_MEMORY_VGA. 618 */ 619 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 620 621 /** 622 * memory_region_get_dirty: Check whether a range of bytes is dirty 623 * for a specified client. 624 * 625 * Checks whether a range of bytes has been written to since the last 626 * call to memory_region_reset_dirty() with the same @client. Dirty logging 627 * must be enabled. 628 * 629 * @mr: the memory region being queried. 630 * @addr: the address (relative to the start of the region) being queried. 631 * @size: the size of the range being queried. 632 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 633 * %DIRTY_MEMORY_VGA. 634 */ 635 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, 636 hwaddr size, unsigned client); 637 638 /** 639 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 640 * 641 * Marks a range of bytes as dirty, after it has been dirtied outside 642 * guest code. 643 * 644 * @mr: the memory region being dirtied. 645 * @addr: the address (relative to the start of the region) being dirtied. 646 * @size: size of the range being dirtied. 647 */ 648 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 649 hwaddr size); 650 651 /** 652 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty 653 * for a specified client. It clears them. 654 * 655 * Checks whether a range of bytes has been written to since the last 656 * call to memory_region_reset_dirty() with the same @client. Dirty logging 657 * must be enabled. 658 * 659 * @mr: the memory region being queried. 660 * @addr: the address (relative to the start of the region) being queried. 661 * @size: the size of the range being queried. 662 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 663 * %DIRTY_MEMORY_VGA. 664 */ 665 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, 666 hwaddr size, unsigned client); 667 /** 668 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with 669 * any external TLBs (e.g. kvm) 670 * 671 * Flushes dirty information from accelerators such as kvm and vhost-net 672 * and makes it available to users of the memory API. 673 * 674 * @mr: the region being flushed. 675 */ 676 void memory_region_sync_dirty_bitmap(MemoryRegion *mr); 677 678 /** 679 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 680 * client. 681 * 682 * Marks a range of pages as no longer dirty. 683 * 684 * @mr: the region being updated. 685 * @addr: the start of the subrange being cleaned. 686 * @size: the size of the subrange being cleaned. 687 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 688 * %DIRTY_MEMORY_VGA. 689 */ 690 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 691 hwaddr size, unsigned client); 692 693 /** 694 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 695 * 696 * Allows a memory region to be marked as read-only (turning it into a ROM). 697 * only useful on RAM regions. 698 * 699 * @mr: the region being updated. 700 * @readonly: whether rhe region is to be ROM or RAM. 701 */ 702 void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 703 704 /** 705 * memory_region_rom_device_set_romd: enable/disable ROMD mode 706 * 707 * Allows a ROM device (initialized with memory_region_init_rom_device() to 708 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 709 * device is mapped to guest memory and satisfies read access directly. 710 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 711 * Writes are always handled by the #MemoryRegion.write function. 712 * 713 * @mr: the memory region to be updated 714 * @romd_mode: %true to put the region into ROMD mode 715 */ 716 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 717 718 /** 719 * memory_region_set_coalescing: Enable memory coalescing for the region. 720 * 721 * Enabled writes to a region to be queued for later processing. MMIO ->write 722 * callbacks may be delayed until a non-coalesced MMIO is issued. 723 * Only useful for IO regions. Roughly similar to write-combining hardware. 724 * 725 * @mr: the memory region to be write coalesced 726 */ 727 void memory_region_set_coalescing(MemoryRegion *mr); 728 729 /** 730 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 731 * a region. 732 * 733 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 734 * Multiple calls can be issued coalesced disjoint ranges. 735 * 736 * @mr: the memory region to be updated. 737 * @offset: the start of the range within the region to be coalesced. 738 * @size: the size of the subrange to be coalesced. 739 */ 740 void memory_region_add_coalescing(MemoryRegion *mr, 741 hwaddr offset, 742 uint64_t size); 743 744 /** 745 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 746 * 747 * Disables any coalescing caused by memory_region_set_coalescing() or 748 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 749 * hardware. 750 * 751 * @mr: the memory region to be updated. 752 */ 753 void memory_region_clear_coalescing(MemoryRegion *mr); 754 755 /** 756 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 757 * accesses. 758 * 759 * Ensure that pending coalesced MMIO request are flushed before the memory 760 * region is accessed. This property is automatically enabled for all regions 761 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 762 * 763 * @mr: the memory region to be updated. 764 */ 765 void memory_region_set_flush_coalesced(MemoryRegion *mr); 766 767 /** 768 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 769 * accesses. 770 * 771 * Clear the automatic coalesced MMIO flushing enabled via 772 * memory_region_set_flush_coalesced. Note that this service has no effect on 773 * memory regions that have MMIO coalescing enabled for themselves. For them, 774 * automatic flushing will stop once coalescing is disabled. 775 * 776 * @mr: the memory region to be updated. 777 */ 778 void memory_region_clear_flush_coalesced(MemoryRegion *mr); 779 780 /** 781 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 782 * is written to a location. 783 * 784 * Marks a word in an IO region (initialized with memory_region_init_io()) 785 * as a trigger for an eventfd event. The I/O callback will not be called. 786 * The caller must be prepared to handle failure (that is, take the required 787 * action if the callback _is_ called). 788 * 789 * @mr: the memory region being updated. 790 * @addr: the address within @mr that is to be monitored 791 * @size: the size of the access to trigger the eventfd 792 * @match_data: whether to match against @data, instead of just @addr 793 * @data: the data to match against the guest write 794 * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 795 **/ 796 void memory_region_add_eventfd(MemoryRegion *mr, 797 hwaddr addr, 798 unsigned size, 799 bool match_data, 800 uint64_t data, 801 EventNotifier *e); 802 803 /** 804 * memory_region_del_eventfd: Cancel an eventfd. 805 * 806 * Cancels an eventfd trigger requested by a previous 807 * memory_region_add_eventfd() call. 808 * 809 * @mr: the memory region being updated. 810 * @addr: the address within @mr that is to be monitored 811 * @size: the size of the access to trigger the eventfd 812 * @match_data: whether to match against @data, instead of just @addr 813 * @data: the data to match against the guest write 814 * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 815 */ 816 void memory_region_del_eventfd(MemoryRegion *mr, 817 hwaddr addr, 818 unsigned size, 819 bool match_data, 820 uint64_t data, 821 EventNotifier *e); 822 823 /** 824 * memory_region_add_subregion: Add a subregion to a container. 825 * 826 * Adds a subregion at @offset. The subregion may not overlap with other 827 * subregions (except for those explicitly marked as overlapping). A region 828 * may only be added once as a subregion (unless removed with 829 * memory_region_del_subregion()); use memory_region_init_alias() if you 830 * want a region to be a subregion in multiple locations. 831 * 832 * @mr: the region to contain the new subregion; must be a container 833 * initialized with memory_region_init(). 834 * @offset: the offset relative to @mr where @subregion is added. 835 * @subregion: the subregion to be added. 836 */ 837 void memory_region_add_subregion(MemoryRegion *mr, 838 hwaddr offset, 839 MemoryRegion *subregion); 840 /** 841 * memory_region_add_subregion_overlap: Add a subregion to a container 842 * with overlap. 843 * 844 * Adds a subregion at @offset. The subregion may overlap with other 845 * subregions. Conflicts are resolved by having a higher @priority hide a 846 * lower @priority. Subregions without priority are taken as @priority 0. 847 * A region may only be added once as a subregion (unless removed with 848 * memory_region_del_subregion()); use memory_region_init_alias() if you 849 * want a region to be a subregion in multiple locations. 850 * 851 * @mr: the region to contain the new subregion; must be a container 852 * initialized with memory_region_init(). 853 * @offset: the offset relative to @mr where @subregion is added. 854 * @subregion: the subregion to be added. 855 * @priority: used for resolving overlaps; highest priority wins. 856 */ 857 void memory_region_add_subregion_overlap(MemoryRegion *mr, 858 hwaddr offset, 859 MemoryRegion *subregion, 860 int priority); 861 862 /** 863 * memory_region_get_ram_addr: Get the ram address associated with a memory 864 * region 865 * 866 * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen 867 * code is being reworked. 868 */ 869 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 870 871 uint64_t memory_region_get_alignment(const MemoryRegion *mr); 872 /** 873 * memory_region_del_subregion: Remove a subregion. 874 * 875 * Removes a subregion from its container. 876 * 877 * @mr: the container to be updated. 878 * @subregion: the region being removed; must be a current subregion of @mr. 879 */ 880 void memory_region_del_subregion(MemoryRegion *mr, 881 MemoryRegion *subregion); 882 883 /* 884 * memory_region_set_enabled: dynamically enable or disable a region 885 * 886 * Enables or disables a memory region. A disabled memory region 887 * ignores all accesses to itself and its subregions. It does not 888 * obscure sibling subregions with lower priority - it simply behaves as 889 * if it was removed from the hierarchy. 890 * 891 * Regions default to being enabled. 892 * 893 * @mr: the region to be updated 894 * @enabled: whether to enable or disable the region 895 */ 896 void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 897 898 /* 899 * memory_region_set_address: dynamically update the address of a region 900 * 901 * Dynamically updates the address of a region, relative to its container. 902 * May be used on regions are currently part of a memory hierarchy. 903 * 904 * @mr: the region to be updated 905 * @addr: new address, relative to container region 906 */ 907 void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 908 909 /* 910 * memory_region_set_size: dynamically update the size of a region. 911 * 912 * Dynamically updates the size of a region. 913 * 914 * @mr: the region to be updated 915 * @size: used size of the region. 916 */ 917 void memory_region_set_size(MemoryRegion *mr, uint64_t size); 918 919 /* 920 * memory_region_set_alias_offset: dynamically update a memory alias's offset 921 * 922 * Dynamically updates the offset into the target region that an alias points 923 * to, as if the fourth argument to memory_region_init_alias() has changed. 924 * 925 * @mr: the #MemoryRegion to be updated; should be an alias. 926 * @offset: the new offset into the target memory region 927 */ 928 void memory_region_set_alias_offset(MemoryRegion *mr, 929 hwaddr offset); 930 931 /** 932 * memory_region_present: checks if an address relative to a @container 933 * translates into #MemoryRegion within @container 934 * 935 * Answer whether a #MemoryRegion within @container covers the address 936 * @addr. 937 * 938 * @container: a #MemoryRegion within which @addr is a relative address 939 * @addr: the area within @container to be searched 940 */ 941 bool memory_region_present(MemoryRegion *container, hwaddr addr); 942 943 /** 944 * memory_region_is_mapped: returns true if #MemoryRegion is mapped 945 * into any address space. 946 * 947 * @mr: a #MemoryRegion which should be checked if it's mapped 948 */ 949 bool memory_region_is_mapped(MemoryRegion *mr); 950 951 /** 952 * memory_region_find: translate an address/size relative to a 953 * MemoryRegion into a #MemoryRegionSection. 954 * 955 * Locates the first #MemoryRegion within @mr that overlaps the range 956 * given by @addr and @size. 957 * 958 * Returns a #MemoryRegionSection that describes a contiguous overlap. 959 * It will have the following characteristics: 960 * .@size = 0 iff no overlap was found 961 * .@mr is non-%NULL iff an overlap was found 962 * 963 * Remember that in the return value the @offset_within_region is 964 * relative to the returned region (in the .@mr field), not to the 965 * @mr argument. 966 * 967 * Similarly, the .@offset_within_address_space is relative to the 968 * address space that contains both regions, the passed and the 969 * returned one. However, in the special case where the @mr argument 970 * has no container (and thus is the root of the address space), the 971 * following will hold: 972 * .@offset_within_address_space >= @addr 973 * .@offset_within_address_space + .@size <= @addr + @size 974 * 975 * @mr: a MemoryRegion within which @addr is a relative address 976 * @addr: start of the area within @as to be searched 977 * @size: size of the area to be searched 978 */ 979 MemoryRegionSection memory_region_find(MemoryRegion *mr, 980 hwaddr addr, uint64_t size); 981 982 /** 983 * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory 984 * 985 * Synchronizes the dirty page log for an entire address space. 986 * @as: the address space that contains the memory being synchronized 987 */ 988 void address_space_sync_dirty_bitmap(AddressSpace *as); 989 990 /** 991 * memory_region_transaction_begin: Start a transaction. 992 * 993 * During a transaction, changes will be accumulated and made visible 994 * only when the transaction ends (is committed). 995 */ 996 void memory_region_transaction_begin(void); 997 998 /** 999 * memory_region_transaction_commit: Commit a transaction and make changes 1000 * visible to the guest. 1001 */ 1002 void memory_region_transaction_commit(void); 1003 1004 /** 1005 * memory_listener_register: register callbacks to be called when memory 1006 * sections are mapped or unmapped into an address 1007 * space 1008 * 1009 * @listener: an object containing the callbacks to be called 1010 * @filter: if non-%NULL, only regions in this address space will be observed 1011 */ 1012 void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 1013 1014 /** 1015 * memory_listener_unregister: undo the effect of memory_listener_register() 1016 * 1017 * @listener: an object containing the callbacks to be removed 1018 */ 1019 void memory_listener_unregister(MemoryListener *listener); 1020 1021 /** 1022 * memory_global_dirty_log_start: begin dirty logging for all regions 1023 */ 1024 void memory_global_dirty_log_start(void); 1025 1026 /** 1027 * memory_global_dirty_log_stop: end dirty logging for all regions 1028 */ 1029 void memory_global_dirty_log_stop(void); 1030 1031 void mtree_info(fprintf_function mon_printf, void *f); 1032 1033 /** 1034 * address_space_init: initializes an address space 1035 * 1036 * @as: an uninitialized #AddressSpace 1037 * @root: a #MemoryRegion that routes addesses for the address space 1038 * @name: an address space name. The name is only used for debugging 1039 * output. 1040 */ 1041 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 1042 1043 1044 /** 1045 * address_space_destroy: destroy an address space 1046 * 1047 * Releases all resources associated with an address space. After an address space 1048 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 1049 * as well. 1050 * 1051 * @as: address space to be destroyed 1052 */ 1053 void address_space_destroy(AddressSpace *as); 1054 1055 /** 1056 * address_space_rw: read from or write to an address space. 1057 * 1058 * Return true if the operation hit any unassigned memory or encountered an 1059 * IOMMU fault. 1060 * 1061 * @as: #AddressSpace to be accessed 1062 * @addr: address within that address space 1063 * @buf: buffer with the data transferred 1064 * @is_write: indicates the transfer direction 1065 */ 1066 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, 1067 int len, bool is_write); 1068 1069 /** 1070 * address_space_write: write to address space. 1071 * 1072 * Return true if the operation hit any unassigned memory or encountered an 1073 * IOMMU fault. 1074 * 1075 * @as: #AddressSpace to be accessed 1076 * @addr: address within that address space 1077 * @buf: buffer with the data transferred 1078 */ 1079 bool address_space_write(AddressSpace *as, hwaddr addr, 1080 const uint8_t *buf, int len); 1081 1082 /** 1083 * address_space_read: read from an address space. 1084 * 1085 * Return true if the operation hit any unassigned memory or encountered an 1086 * IOMMU fault. 1087 * 1088 * @as: #AddressSpace to be accessed 1089 * @addr: address within that address space 1090 * @buf: buffer with the data transferred 1091 */ 1092 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len); 1093 1094 /* address_space_translate: translate an address range into an address space 1095 * into a MemoryRegion and an address range into that section 1096 * 1097 * @as: #AddressSpace to be accessed 1098 * @addr: address within that address space 1099 * @xlat: pointer to address within the returned memory region section's 1100 * #MemoryRegion. 1101 * @len: pointer to length 1102 * @is_write: indicates the transfer direction 1103 */ 1104 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, 1105 hwaddr *xlat, hwaddr *len, 1106 bool is_write); 1107 1108 /* address_space_access_valid: check for validity of accessing an address 1109 * space range 1110 * 1111 * Check whether memory is assigned to the given address space range, and 1112 * access is permitted by any IOMMU regions that are active for the address 1113 * space. 1114 * 1115 * For now, addr and len should be aligned to a page size. This limitation 1116 * will be lifted in the future. 1117 * 1118 * @as: #AddressSpace to be accessed 1119 * @addr: address within that address space 1120 * @len: length of the area to be checked 1121 * @is_write: indicates the transfer direction 1122 */ 1123 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write); 1124 1125 /* address_space_map: map a physical memory region into a host virtual address 1126 * 1127 * May map a subset of the requested range, given by and returned in @plen. 1128 * May return %NULL if resources needed to perform the mapping are exhausted. 1129 * Use only for reads OR writes - not for read-modify-write operations. 1130 * Use cpu_register_map_client() to know when retrying the map operation is 1131 * likely to succeed. 1132 * 1133 * @as: #AddressSpace to be accessed 1134 * @addr: address within that address space 1135 * @plen: pointer to length of buffer; updated on return 1136 * @is_write: indicates the transfer direction 1137 */ 1138 void *address_space_map(AddressSpace *as, hwaddr addr, 1139 hwaddr *plen, bool is_write); 1140 1141 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 1142 * 1143 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 1144 * the amount of memory that was actually read or written by the caller. 1145 * 1146 * @as: #AddressSpace used 1147 * @addr: address within that address space 1148 * @len: buffer length as returned by address_space_map() 1149 * @access_len: amount of data actually transferred 1150 * @is_write: indicates the transfer direction 1151 */ 1152 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 1153 int is_write, hwaddr access_len); 1154 1155 1156 #endif 1157 1158 #endif 1159