xref: /openbmc/qemu/include/exec/memory-internal.h (revision e1fe50dc)
1 /*
2  * Declarations for obsolete exec.c functions
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16  * The functions declared here will be removed soon.
17  */
18 
19 #ifndef MEMORY_INTERNAL_H
20 #define MEMORY_INTERNAL_H
21 
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 
25 typedef struct PhysPageEntry PhysPageEntry;
26 
27 struct PhysPageEntry {
28     uint16_t is_leaf : 1;
29      /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
30     uint16_t ptr : 15;
31 };
32 
33 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
34 
35 struct AddressSpaceDispatch {
36     /* This is a multi-level map on the physical address space.
37      * The bottom level has pointers to MemoryRegionSections.
38      */
39     PhysPageEntry phys_map;
40     MemoryListener listener;
41 };
42 
43 void address_space_init_dispatch(AddressSpace *as);
44 void address_space_destroy_dispatch(AddressSpace *as);
45 
46 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
47                                    MemoryRegion *mr);
48 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
49 void qemu_ram_free(ram_addr_t addr);
50 void qemu_ram_free_from_ptr(ram_addr_t addr);
51 
52 #define VGA_DIRTY_FLAG       0x01
53 #define CODE_DIRTY_FLAG      0x02
54 #define MIGRATION_DIRTY_FLAG 0x08
55 
56 static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
57 {
58     return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
59 }
60 
61 /* read dirty bit (return 0 or 1) */
62 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
63 {
64     return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
65 }
66 
67 static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
68                                                 ram_addr_t length,
69                                                 int dirty_flags)
70 {
71     int ret = 0;
72     ram_addr_t addr, end;
73 
74     end = TARGET_PAGE_ALIGN(start + length);
75     start &= TARGET_PAGE_MASK;
76     for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
77         ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
78     }
79     return ret;
80 }
81 
82 static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
83                                                       int dirty_flags)
84 {
85     return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
86 }
87 
88 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
89 {
90     cpu_physical_memory_set_dirty_flags(addr, 0xff);
91 }
92 
93 static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
94                                                         int dirty_flags)
95 {
96     int mask = ~dirty_flags;
97 
98     return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
99 }
100 
101 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
102                                                        ram_addr_t length,
103                                                        int dirty_flags)
104 {
105     ram_addr_t addr, end;
106 
107     end = TARGET_PAGE_ALIGN(start + length);
108     start &= TARGET_PAGE_MASK;
109     for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
110         cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
111     }
112     xen_modified_memory(addr, length);
113 }
114 
115 static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
116                                                         ram_addr_t length,
117                                                         int dirty_flags)
118 {
119     ram_addr_t addr, end;
120 
121     end = TARGET_PAGE_ALIGN(start + length);
122     start &= TARGET_PAGE_MASK;
123     for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
124         cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
125     }
126 }
127 
128 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
129                                      int dirty_flags);
130 
131 extern const IORangeOps memory_region_iorange_ops;
132 
133 #endif
134 
135 #endif
136