xref: /openbmc/qemu/include/exec/ram_addr.h (revision fd87be1dada5672f877e03c2ca8504458292c479)
1220c3ebdSJuan Quintela /*
2220c3ebdSJuan Quintela  * Declarations for cpu physical memory functions
3220c3ebdSJuan Quintela  *
4220c3ebdSJuan Quintela  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5220c3ebdSJuan Quintela  *
6220c3ebdSJuan Quintela  * Authors:
7220c3ebdSJuan Quintela  *  Avi Kivity <avi@redhat.com>
8220c3ebdSJuan Quintela  *
9220c3ebdSJuan Quintela  * This work is licensed under the terms of the GNU GPL, version 2 or
10220c3ebdSJuan Quintela  * later.  See the COPYING file in the top-level directory.
11220c3ebdSJuan Quintela  *
12220c3ebdSJuan Quintela  */
13220c3ebdSJuan Quintela 
14220c3ebdSJuan Quintela /*
15220c3ebdSJuan Quintela  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16220c3ebdSJuan Quintela  * The functions declared here will be removed soon.
17220c3ebdSJuan Quintela  */
18220c3ebdSJuan Quintela 
19220c3ebdSJuan Quintela #ifndef RAM_ADDR_H
20220c3ebdSJuan Quintela #define RAM_ADDR_H
21220c3ebdSJuan Quintela 
22220c3ebdSJuan Quintela #ifndef CONFIG_USER_ONLY
23ec150c7eSMarkus Armbruster #include "cpu.h"
24da278d58SPhilippe Mathieu-Daudé #include "sysemu/xen.h"
2514a48c1dSMarkus Armbruster #include "sysemu/tcg.h"
260987d735SPaolo Bonzini #include "exec/ramlist.h"
2741aa4e9fSJuan Quintela #include "exec/ramblock.h"
2886a9ae80SNicholas Piggin #include "exec/exec-all.h"
29*edfc8587SPhilippe Mathieu-Daudé #include "qemu/rcu.h"
303c9589e1SDr. David Alan Gilbert 
314998a37eSHyman Huang(黄勇) extern uint64_t total_dirty_pages;
324998a37eSHyman Huang(黄勇) 
33002cad6bSPeter Xu /**
34002cad6bSPeter Xu  * clear_bmap_size: calculate clear bitmap size
35002cad6bSPeter Xu  *
36002cad6bSPeter Xu  * @pages: number of guest pages
37002cad6bSPeter Xu  * @shift: guest page number shift
38002cad6bSPeter Xu  *
39002cad6bSPeter Xu  * Returns: number of bits for the clear bitmap
40002cad6bSPeter Xu  */
clear_bmap_size(uint64_t pages,uint8_t shift)41002cad6bSPeter Xu static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
42002cad6bSPeter Xu {
43002cad6bSPeter Xu     return DIV_ROUND_UP(pages, 1UL << shift);
44002cad6bSPeter Xu }
45002cad6bSPeter Xu 
46002cad6bSPeter Xu /**
47cedb70eaSPeter Xu  * clear_bmap_set: set clear bitmap for the page range.  Must be with
48cedb70eaSPeter Xu  * bitmap_mutex held.
49002cad6bSPeter Xu  *
50002cad6bSPeter Xu  * @rb: the ramblock to operate on
51002cad6bSPeter Xu  * @start: the start page number
52002cad6bSPeter Xu  * @size: number of pages to set in the bitmap
53002cad6bSPeter Xu  *
54002cad6bSPeter Xu  * Returns: None
55002cad6bSPeter Xu  */
clear_bmap_set(RAMBlock * rb,uint64_t start,uint64_t npages)56002cad6bSPeter Xu static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
57002cad6bSPeter Xu                                   uint64_t npages)
58002cad6bSPeter Xu {
59002cad6bSPeter Xu     uint8_t shift = rb->clear_bmap_shift;
60002cad6bSPeter Xu 
61cedb70eaSPeter Xu     bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
62002cad6bSPeter Xu }
63002cad6bSPeter Xu 
64002cad6bSPeter Xu /**
65cedb70eaSPeter Xu  * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
66cedb70eaSPeter Xu  * Must be with bitmap_mutex held.
67002cad6bSPeter Xu  *
68002cad6bSPeter Xu  * @rb: the ramblock to operate on
69002cad6bSPeter Xu  * @page: the page number to check
70002cad6bSPeter Xu  *
71002cad6bSPeter Xu  * Returns: true if the bit was set, false otherwise
72002cad6bSPeter Xu  */
clear_bmap_test_and_clear(RAMBlock * rb,uint64_t page)73002cad6bSPeter Xu static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
74002cad6bSPeter Xu {
75002cad6bSPeter Xu     uint8_t shift = rb->clear_bmap_shift;
76002cad6bSPeter Xu 
77cedb70eaSPeter Xu     return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
78002cad6bSPeter Xu }
79002cad6bSPeter Xu 
offset_in_ramblock(RAMBlock * b,ram_addr_t offset)804c4bad48Szhanghailiang static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
814c4bad48Szhanghailiang {
824c4bad48Szhanghailiang     return (b && b->host && offset < b->used_length) ? true : false;
834c4bad48Szhanghailiang }
844c4bad48Szhanghailiang 
ramblock_ptr(RAMBlock * block,ram_addr_t offset)853c9589e1SDr. David Alan Gilbert static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
863c9589e1SDr. David Alan Gilbert {
874c4bad48Szhanghailiang     assert(offset_in_ramblock(block, offset));
883c9589e1SDr. David Alan Gilbert     return (char *)block->host + offset;
893c9589e1SDr. David Alan Gilbert }
903c9589e1SDr. David Alan Gilbert 
ramblock_recv_bitmap_offset(void * host_addr,RAMBlock * rb)91f9494614SAlexey Perevalov static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
92f9494614SAlexey Perevalov                                                             RAMBlock *rb)
93f9494614SAlexey Perevalov {
94f9494614SAlexey Perevalov     uint64_t host_addr_offset =
95f9494614SAlexey Perevalov             (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
96f9494614SAlexey Perevalov     return host_addr_offset >> TARGET_PAGE_BITS;
97f9494614SAlexey Perevalov }
98f9494614SAlexey Perevalov 
99a4de8552SJunyan He bool ramblock_is_pmem(RAMBlock *rb);
100a4de8552SJunyan He 
101905b7ee4SDavid Hildenbrand long qemu_minrampagesize(void);
102905b7ee4SDavid Hildenbrand long qemu_maxrampagesize(void);
103cbfc0171SJunyan He 
104cbfc0171SJunyan He /**
105cbfc0171SJunyan He  * qemu_ram_alloc_from_file,
106cbfc0171SJunyan He  * qemu_ram_alloc_from_fd:  Allocate a ram block from the specified backing
107cbfc0171SJunyan He  *                          file or device
108cbfc0171SJunyan He  *
109cbfc0171SJunyan He  * Parameters:
110cbfc0171SJunyan He  *  @size: the size in bytes of the ram block
111cbfc0171SJunyan He  *  @mr: the memory region where the ram block is
1128dbe22c6SDavid Hildenbrand  *  @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1135c52a219SDavid Hildenbrand  *              RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
11415f7a80cSXiaoyao Li  *              RAM_READONLY_FD, RAM_GUEST_MEMFD
115cbfc0171SJunyan He  *  @mem_path or @fd: specify the backing file or device
1164b870dc4SAlexander Graf  *  @offset: Offset into target file
117cbfc0171SJunyan He  *  @errp: pointer to Error*, to store an error if it happens
118cbfc0171SJunyan He  *
119cbfc0171SJunyan He  * Return:
120cbfc0171SJunyan He  *  On success, return a pointer to the ram block.
121cbfc0171SJunyan He  *  On failure, return NULL.
122cbfc0171SJunyan He  */
123528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
124cbfc0171SJunyan He                                    uint32_t ram_flags, const char *mem_path,
1255c52a219SDavid Hildenbrand                                    off_t offset, Error **errp);
12638b3362dSMarc-André Lureau RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
12744a4ff31SJagannathan Raman                                  uint32_t ram_flags, int fd, off_t offset,
1285c52a219SDavid Hildenbrand                                  Error **errp);
129cbfc0171SJunyan He 
130528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
131ef701d7bSHu Tao                                   MemoryRegion *mr, Error **errp);
132ebef62d0SDavid Hildenbrand RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, MemoryRegion *mr,
13306329cceSMarcel Apfelbaum                          Error **errp);
134528f46afSFam Zheng RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
13562be4e3aSMichael S. Tsirkin                                     void (*resized)(const char*,
13662be4e3aSMichael S. Tsirkin                                                     uint64_t length,
13762be4e3aSMichael S. Tsirkin                                                     void *host),
13862be4e3aSMichael S. Tsirkin                                     MemoryRegion *mr, Error **errp);
139f1060c55SFam Zheng void qemu_ram_free(RAMBlock *block);
140220c3ebdSJuan Quintela 
141fa53a0e5SGonglei int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
14262be4e3aSMichael S. Tsirkin 
143ab7e41e6SPhilippe Mathieu-Daudé void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length);
14461c490e2SBeata Michalska 
14561c490e2SBeata Michalska /* Clear whole block of mem */
qemu_ram_block_writeback(RAMBlock * block)14661c490e2SBeata Michalska static inline void qemu_ram_block_writeback(RAMBlock *block)
14761c490e2SBeata Michalska {
148ab7e41e6SPhilippe Mathieu-Daudé     qemu_ram_msync(block, 0, block->used_length);
14961c490e2SBeata Michalska }
15061c490e2SBeata Michalska 
15158d2707eSPaolo Bonzini #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
15258d2707eSPaolo Bonzini #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
15358d2707eSPaolo Bonzini 
cpu_physical_memory_get_dirty(ram_addr_t start,ram_addr_t length,unsigned client)154220c3ebdSJuan Quintela static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
155220c3ebdSJuan Quintela                                                  ram_addr_t length,
156220c3ebdSJuan Quintela                                                  unsigned client)
157220c3ebdSJuan Quintela {
1585b82b703SStefan Hajnoczi     DirtyMemoryBlocks *blocks;
1595b82b703SStefan Hajnoczi     unsigned long end, page;
16088c73d16SPaolo Bonzini     unsigned long idx, offset, base;
1615b82b703SStefan Hajnoczi     bool dirty = false;
162220c3ebdSJuan Quintela 
163220c3ebdSJuan Quintela     assert(client < DIRTY_MEMORY_NUM);
164220c3ebdSJuan Quintela 
165220c3ebdSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
166220c3ebdSJuan Quintela     page = start >> TARGET_PAGE_BITS;
167220c3ebdSJuan Quintela 
168694ea274SDr. David Alan Gilbert     WITH_RCU_READ_LOCK_GUARD() {
169d73415a3SStefan Hajnoczi         blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
1705b82b703SStefan Hajnoczi 
17188c73d16SPaolo Bonzini         idx = page / DIRTY_MEMORY_BLOCK_SIZE;
17288c73d16SPaolo Bonzini         offset = page % DIRTY_MEMORY_BLOCK_SIZE;
17388c73d16SPaolo Bonzini         base = page - offset;
1745b82b703SStefan Hajnoczi         while (page < end) {
17588c73d16SPaolo Bonzini             unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
17688c73d16SPaolo Bonzini             unsigned long num = next - base;
177694ea274SDr. David Alan Gilbert             unsigned long found = find_next_bit(blocks->blocks[idx],
178694ea274SDr. David Alan Gilbert                                                 num, offset);
17988c73d16SPaolo Bonzini             if (found < num) {
1805b82b703SStefan Hajnoczi                 dirty = true;
1815b82b703SStefan Hajnoczi                 break;
1825b82b703SStefan Hajnoczi             }
1835b82b703SStefan Hajnoczi 
18488c73d16SPaolo Bonzini             page = next;
18588c73d16SPaolo Bonzini             idx++;
18688c73d16SPaolo Bonzini             offset = 0;
18788c73d16SPaolo Bonzini             base += DIRTY_MEMORY_BLOCK_SIZE;
1885b82b703SStefan Hajnoczi         }
189694ea274SDr. David Alan Gilbert     }
1905b82b703SStefan Hajnoczi 
1915b82b703SStefan Hajnoczi     return dirty;
192220c3ebdSJuan Quintela }
193220c3ebdSJuan Quintela 
cpu_physical_memory_all_dirty(ram_addr_t start,ram_addr_t length,unsigned client)19472b47e79SPaolo Bonzini static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
195f874bf90SPeter Maydell                                                  ram_addr_t length,
196f874bf90SPeter Maydell                                                  unsigned client)
197f874bf90SPeter Maydell {
1985b82b703SStefan Hajnoczi     DirtyMemoryBlocks *blocks;
1995b82b703SStefan Hajnoczi     unsigned long end, page;
20088c73d16SPaolo Bonzini     unsigned long idx, offset, base;
2015b82b703SStefan Hajnoczi     bool dirty = true;
202f874bf90SPeter Maydell 
203f874bf90SPeter Maydell     assert(client < DIRTY_MEMORY_NUM);
204f874bf90SPeter Maydell 
205f874bf90SPeter Maydell     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
206f874bf90SPeter Maydell     page = start >> TARGET_PAGE_BITS;
207f874bf90SPeter Maydell 
208694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
2095b82b703SStefan Hajnoczi 
210d73415a3SStefan Hajnoczi     blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
2115b82b703SStefan Hajnoczi 
21288c73d16SPaolo Bonzini     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
21388c73d16SPaolo Bonzini     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
21488c73d16SPaolo Bonzini     base = page - offset;
2155b82b703SStefan Hajnoczi     while (page < end) {
21688c73d16SPaolo Bonzini         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
21788c73d16SPaolo Bonzini         unsigned long num = next - base;
21888c73d16SPaolo Bonzini         unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
21988c73d16SPaolo Bonzini         if (found < num) {
2205b82b703SStefan Hajnoczi             dirty = false;
2215b82b703SStefan Hajnoczi             break;
2225b82b703SStefan Hajnoczi         }
2235b82b703SStefan Hajnoczi 
22488c73d16SPaolo Bonzini         page = next;
22588c73d16SPaolo Bonzini         idx++;
22688c73d16SPaolo Bonzini         offset = 0;
22788c73d16SPaolo Bonzini         base += DIRTY_MEMORY_BLOCK_SIZE;
2285b82b703SStefan Hajnoczi     }
2295b82b703SStefan Hajnoczi 
2305b82b703SStefan Hajnoczi     return dirty;
231f874bf90SPeter Maydell }
232f874bf90SPeter Maydell 
cpu_physical_memory_get_dirty_flag(ram_addr_t addr,unsigned client)233220c3ebdSJuan Quintela static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
234220c3ebdSJuan Quintela                                                       unsigned client)
235220c3ebdSJuan Quintela {
236220c3ebdSJuan Quintela     return cpu_physical_memory_get_dirty(addr, 1, client);
237220c3ebdSJuan Quintela }
238220c3ebdSJuan Quintela 
cpu_physical_memory_is_clean(ram_addr_t addr)239220c3ebdSJuan Quintela static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
240220c3ebdSJuan Quintela {
241220c3ebdSJuan Quintela     bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
242220c3ebdSJuan Quintela     bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
243220c3ebdSJuan Quintela     bool migration =
244220c3ebdSJuan Quintela         cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
245220c3ebdSJuan Quintela     return !(vga && code && migration);
246220c3ebdSJuan Quintela }
247220c3ebdSJuan Quintela 
cpu_physical_memory_range_includes_clean(ram_addr_t start,ram_addr_t length,uint8_t mask)248e87f7778SPaolo Bonzini static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
249e87f7778SPaolo Bonzini                                                                ram_addr_t length,
250e87f7778SPaolo Bonzini                                                                uint8_t mask)
251f874bf90SPeter Maydell {
252e87f7778SPaolo Bonzini     uint8_t ret = 0;
253e87f7778SPaolo Bonzini 
254e87f7778SPaolo Bonzini     if (mask & (1 << DIRTY_MEMORY_VGA) &&
255e87f7778SPaolo Bonzini         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
256e87f7778SPaolo Bonzini         ret |= (1 << DIRTY_MEMORY_VGA);
257e87f7778SPaolo Bonzini     }
258e87f7778SPaolo Bonzini     if (mask & (1 << DIRTY_MEMORY_CODE) &&
259e87f7778SPaolo Bonzini         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
260e87f7778SPaolo Bonzini         ret |= (1 << DIRTY_MEMORY_CODE);
261e87f7778SPaolo Bonzini     }
262e87f7778SPaolo Bonzini     if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
263e87f7778SPaolo Bonzini         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
264e87f7778SPaolo Bonzini         ret |= (1 << DIRTY_MEMORY_MIGRATION);
265e87f7778SPaolo Bonzini     }
266e87f7778SPaolo Bonzini     return ret;
267f874bf90SPeter Maydell }
268f874bf90SPeter Maydell 
cpu_physical_memory_set_dirty_flag(ram_addr_t addr,unsigned client)269220c3ebdSJuan Quintela static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
270220c3ebdSJuan Quintela                                                       unsigned client)
271220c3ebdSJuan Quintela {
2725b82b703SStefan Hajnoczi     unsigned long page, idx, offset;
2735b82b703SStefan Hajnoczi     DirtyMemoryBlocks *blocks;
2745b82b703SStefan Hajnoczi 
275220c3ebdSJuan Quintela     assert(client < DIRTY_MEMORY_NUM);
2765b82b703SStefan Hajnoczi 
2775b82b703SStefan Hajnoczi     page = addr >> TARGET_PAGE_BITS;
2785b82b703SStefan Hajnoczi     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
2795b82b703SStefan Hajnoczi     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
2805b82b703SStefan Hajnoczi 
281694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
2825b82b703SStefan Hajnoczi 
283d73415a3SStefan Hajnoczi     blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
2845b82b703SStefan Hajnoczi 
2855b82b703SStefan Hajnoczi     set_bit_atomic(offset, blocks->blocks[idx]);
286220c3ebdSJuan Quintela }
287220c3ebdSJuan Quintela 
cpu_physical_memory_set_dirty_range(ram_addr_t start,ram_addr_t length,uint8_t mask)288220c3ebdSJuan Quintela static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
28958d2707eSPaolo Bonzini                                                        ram_addr_t length,
29058d2707eSPaolo Bonzini                                                        uint8_t mask)
291220c3ebdSJuan Quintela {
2925b82b703SStefan Hajnoczi     DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
293220c3ebdSJuan Quintela     unsigned long end, page;
29488c73d16SPaolo Bonzini     unsigned long idx, offset, base;
2955b82b703SStefan Hajnoczi     int i;
296220c3ebdSJuan Quintela 
2978bafcb21SPaolo Bonzini     if (!mask && !xen_enabled()) {
2988bafcb21SPaolo Bonzini         return;
2998bafcb21SPaolo Bonzini     }
3008bafcb21SPaolo Bonzini 
301220c3ebdSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
302220c3ebdSJuan Quintela     page = start >> TARGET_PAGE_BITS;
3035b82b703SStefan Hajnoczi 
304694ea274SDr. David Alan Gilbert     WITH_RCU_READ_LOCK_GUARD() {
3055b82b703SStefan Hajnoczi         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
306d73415a3SStefan Hajnoczi             blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
3075b82b703SStefan Hajnoczi         }
3085b82b703SStefan Hajnoczi 
30988c73d16SPaolo Bonzini         idx = page / DIRTY_MEMORY_BLOCK_SIZE;
31088c73d16SPaolo Bonzini         offset = page % DIRTY_MEMORY_BLOCK_SIZE;
31188c73d16SPaolo Bonzini         base = page - offset;
3125b82b703SStefan Hajnoczi         while (page < end) {
31388c73d16SPaolo Bonzini             unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
3145b82b703SStefan Hajnoczi 
31558d2707eSPaolo Bonzini             if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
3165b82b703SStefan Hajnoczi                 bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
31788c73d16SPaolo Bonzini                                   offset, next - page);
31858d2707eSPaolo Bonzini             }
31958d2707eSPaolo Bonzini             if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
3205b82b703SStefan Hajnoczi                 bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
32188c73d16SPaolo Bonzini                                   offset, next - page);
32258d2707eSPaolo Bonzini             }
32358d2707eSPaolo Bonzini             if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
3245b82b703SStefan Hajnoczi                 bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
32588c73d16SPaolo Bonzini                                   offset, next - page);
32658d2707eSPaolo Bonzini             }
3275b82b703SStefan Hajnoczi 
32888c73d16SPaolo Bonzini             page = next;
32988c73d16SPaolo Bonzini             idx++;
33088c73d16SPaolo Bonzini             offset = 0;
33188c73d16SPaolo Bonzini             base += DIRTY_MEMORY_BLOCK_SIZE;
3325b82b703SStefan Hajnoczi         }
333694ea274SDr. David Alan Gilbert     }
3345b82b703SStefan Hajnoczi 
3355100afb5SPaul Durrant     xen_hvm_modified_memory(start, length);
336220c3ebdSJuan Quintela }
337220c3ebdSJuan Quintela 
338fb3ecb7eSStefan Weil #if !defined(_WIN32)
339f80929f3SJoao Martins 
340f80929f3SJoao Martins /*
341f80929f3SJoao Martins  * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
342f80929f3SJoao Martins  * the number of dirty pages in @bitmap passed as argument. On the other hand,
343f80929f3SJoao Martins  * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
344f80929f3SJoao Martins  * weren't set in the global migration bitmap.
345f80929f3SJoao Martins  */
346f80929f3SJoao Martins static inline
cpu_physical_memory_set_dirty_lebitmap(unsigned long * bitmap,ram_addr_t start,ram_addr_t pages)347f80929f3SJoao Martins uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
3485ff7fb77SJuan Quintela                                                 ram_addr_t start,
3495ff7fb77SJuan Quintela                                                 ram_addr_t pages)
3505ff7fb77SJuan Quintela {
351ae2810c4SJuan Quintela     unsigned long i, j;
352f80929f3SJoao Martins     unsigned long page_number, c, nbits;
3535ff7fb77SJuan Quintela     hwaddr addr;
3545ff7fb77SJuan Quintela     ram_addr_t ram_addr;
355f80929f3SJoao Martins     uint64_t num_dirty = 0;
356ae2810c4SJuan Quintela     unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
3578e3b0cbbSMarc-André Lureau     unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
358ae2810c4SJuan Quintela     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
3595ff7fb77SJuan Quintela 
360ae2810c4SJuan Quintela     /* start address is aligned at the start of a word? */
361f9ee9f9aSAlexey Kardashevskiy     if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
362f9ee9f9aSAlexey Kardashevskiy         (hpratio == 1)) {
3635b82b703SStefan Hajnoczi         unsigned long **blocks[DIRTY_MEMORY_NUM];
3645b82b703SStefan Hajnoczi         unsigned long idx;
3655b82b703SStefan Hajnoczi         unsigned long offset;
366ae2810c4SJuan Quintela         long k;
367ae2810c4SJuan Quintela         long nr = BITS_TO_LONGS(pages);
368ae2810c4SJuan Quintela 
3695b82b703SStefan Hajnoczi         idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
3705b82b703SStefan Hajnoczi         offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
3715b82b703SStefan Hajnoczi                           DIRTY_MEMORY_BLOCK_SIZE);
3725b82b703SStefan Hajnoczi 
373694ea274SDr. David Alan Gilbert         WITH_RCU_READ_LOCK_GUARD() {
3745b82b703SStefan Hajnoczi             for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
375d73415a3SStefan Hajnoczi                 blocks[i] =
376d73415a3SStefan Hajnoczi                     qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
3775b82b703SStefan Hajnoczi             }
3785b82b703SStefan Hajnoczi 
379ae2810c4SJuan Quintela             for (k = 0; k < nr; k++) {
380ae2810c4SJuan Quintela                 if (bitmap[k]) {
381ae2810c4SJuan Quintela                     unsigned long temp = leul_to_cpu(bitmap[k]);
382ae2810c4SJuan Quintela 
383f80929f3SJoao Martins                     nbits = ctpopl(temp);
384d73415a3SStefan Hajnoczi                     qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
385ae7a2bcaSPeter Xu 
38663b41db4SHyman Huang(黄勇)                     if (global_dirty_tracking) {
387d73415a3SStefan Hajnoczi                         qatomic_or(
388d73415a3SStefan Hajnoczi                                 &blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
389ae7a2bcaSPeter Xu                                 temp);
3904998a37eSHyman Huang(黄勇)                         if (unlikely(
3914998a37eSHyman Huang(黄勇)                             global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
392f80929f3SJoao Martins                             total_dirty_pages += nbits;
3934998a37eSHyman Huang(黄勇)                         }
394ae7a2bcaSPeter Xu                     }
395ae7a2bcaSPeter Xu 
396f80929f3SJoao Martins                     num_dirty += nbits;
397f80929f3SJoao Martins 
3989460dee4SPaolo Bonzini                     if (tcg_enabled()) {
399d73415a3SStefan Hajnoczi                         qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
400694ea274SDr. David Alan Gilbert                                    temp);
401ae2810c4SJuan Quintela                     }
402ae2810c4SJuan Quintela                 }
4035b82b703SStefan Hajnoczi 
4045b82b703SStefan Hajnoczi                 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
4055b82b703SStefan Hajnoczi                     offset = 0;
4065b82b703SStefan Hajnoczi                     idx++;
4079460dee4SPaolo Bonzini                 }
4085b82b703SStefan Hajnoczi             }
409694ea274SDr. David Alan Gilbert         }
4105b82b703SStefan Hajnoczi 
4115100afb5SPaul Durrant         xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
412ae2810c4SJuan Quintela     } else {
4139460dee4SPaolo Bonzini         uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
414ae7a2bcaSPeter Xu 
41563b41db4SHyman Huang(黄勇)         if (!global_dirty_tracking) {
416ae7a2bcaSPeter Xu             clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
417ae7a2bcaSPeter Xu         }
418ae7a2bcaSPeter Xu 
4195ff7fb77SJuan Quintela         /*
4205ff7fb77SJuan Quintela          * bitmap-traveling is faster than memory-traveling (for addr...)
4215ff7fb77SJuan Quintela          * especially when most of the memory is not dirty.
4225ff7fb77SJuan Quintela          */
4235ff7fb77SJuan Quintela         for (i = 0; i < len; i++) {
4245ff7fb77SJuan Quintela             if (bitmap[i] != 0) {
4255ff7fb77SJuan Quintela                 c = leul_to_cpu(bitmap[i]);
426f80929f3SJoao Martins                 nbits = ctpopl(c);
4274998a37eSHyman Huang(黄勇)                 if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
428f80929f3SJoao Martins                     total_dirty_pages += nbits;
4294998a37eSHyman Huang(黄勇)                 }
430f80929f3SJoao Martins                 num_dirty += nbits;
4315ff7fb77SJuan Quintela                 do {
4327224f66eSNatanael Copa                     j = ctzl(c);
4335ff7fb77SJuan Quintela                     c &= ~(1ul << j);
4345ff7fb77SJuan Quintela                     page_number = (i * HOST_LONG_BITS + j) * hpratio;
4355ff7fb77SJuan Quintela                     addr = page_number * TARGET_PAGE_SIZE;
4365ff7fb77SJuan Quintela                     ram_addr = start + addr;
4375ff7fb77SJuan Quintela                     cpu_physical_memory_set_dirty_range(ram_addr,
4389460dee4SPaolo Bonzini                                        TARGET_PAGE_SIZE * hpratio, clients);
4395ff7fb77SJuan Quintela                 } while (c != 0);
4405ff7fb77SJuan Quintela             }
4415ff7fb77SJuan Quintela         }
4425ff7fb77SJuan Quintela     }
443f80929f3SJoao Martins 
444f80929f3SJoao Martins     return num_dirty;
445ae2810c4SJuan Quintela }
446fb3ecb7eSStefan Weil #endif /* not _WIN32 */
4475ff7fb77SJuan Quintela 
cpu_physical_memory_dirty_bits_cleared(ram_addr_t start,ram_addr_t length)44886a9ae80SNicholas Piggin static inline void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start,
44986a9ae80SNicholas Piggin                                                           ram_addr_t length)
45086a9ae80SNicholas Piggin {
45186a9ae80SNicholas Piggin     if (tcg_enabled()) {
45286a9ae80SNicholas Piggin         tlb_reset_dirty_range_all(start, length);
45386a9ae80SNicholas Piggin     }
45486a9ae80SNicholas Piggin 
45586a9ae80SNicholas Piggin }
45603eebc9eSStefan Hajnoczi bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
457220c3ebdSJuan Quintela                                               ram_addr_t length,
45803eebc9eSStefan Hajnoczi                                               unsigned client);
459220c3ebdSJuan Quintela 
4608deaf12cSGerd Hoffmann DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
4615dea4079SPeter Xu     (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
4628deaf12cSGerd Hoffmann 
4638deaf12cSGerd Hoffmann bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
4648deaf12cSGerd Hoffmann                                             ram_addr_t start,
4658deaf12cSGerd Hoffmann                                             ram_addr_t length);
4668deaf12cSGerd Hoffmann 
cpu_physical_memory_clear_dirty_range(ram_addr_t start,ram_addr_t length)467c8d6f66aSMichael S. Tsirkin static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
468c8d6f66aSMichael S. Tsirkin                                                          ram_addr_t length)
469c8d6f66aSMichael S. Tsirkin {
47003eebc9eSStefan Hajnoczi     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
47103eebc9eSStefan Hajnoczi     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
47203eebc9eSStefan Hajnoczi     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
473c8d6f66aSMichael S. Tsirkin }
474c8d6f66aSMichael S. Tsirkin 
475c8d6f66aSMichael S. Tsirkin 
476267691b6SPeter Xu /* Called with RCU critical section */
47720015f72SStefan Hajnoczi static inline
cpu_physical_memory_sync_dirty_bitmap(RAMBlock * rb,ram_addr_t start,ram_addr_t length)4786b6712efSJuan Quintela uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
47920015f72SStefan Hajnoczi                                                ram_addr_t start,
480fb613580SKeqian Zhu                                                ram_addr_t length)
48120015f72SStefan Hajnoczi {
48220015f72SStefan Hajnoczi     ram_addr_t addr;
483f70d3451SDr. David Alan Gilbert     unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
48420015f72SStefan Hajnoczi     uint64_t num_dirty = 0;
4856b6712efSJuan Quintela     unsigned long *dest = rb->bmap;
48620015f72SStefan Hajnoczi 
487aa777e29SDr. David Alan Gilbert     /* start address and length is aligned at the start of a word? */
488f70d3451SDr. David Alan Gilbert     if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
489aa777e29SDr. David Alan Gilbert          (start + rb->offset) &&
490aa777e29SDr. David Alan Gilbert         !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
49120015f72SStefan Hajnoczi         int k;
49220015f72SStefan Hajnoczi         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
4935b82b703SStefan Hajnoczi         unsigned long * const *src;
494084140bdSHaozhong Zhang         unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
495084140bdSHaozhong Zhang         unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
4965b82b703SStefan Hajnoczi                                         DIRTY_MEMORY_BLOCK_SIZE);
497f70d3451SDr. David Alan Gilbert         unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
4985b82b703SStefan Hajnoczi 
499d73415a3SStefan Hajnoczi         src = qatomic_rcu_read(
5005b82b703SStefan Hajnoczi                 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
50120015f72SStefan Hajnoczi 
50220015f72SStefan Hajnoczi         for (k = page; k < page + nr; k++) {
5035b82b703SStefan Hajnoczi             if (src[idx][offset]) {
504d73415a3SStefan Hajnoczi                 unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
50520015f72SStefan Hajnoczi                 unsigned long new_dirty;
50620015f72SStefan Hajnoczi                 new_dirty = ~dest[k];
5075f2cb946SStefan Hajnoczi                 dest[k] |= bits;
5085f2cb946SStefan Hajnoczi                 new_dirty &= bits;
50920015f72SStefan Hajnoczi                 num_dirty += ctpopl(new_dirty);
51020015f72SStefan Hajnoczi             }
5115b82b703SStefan Hajnoczi 
5125b82b703SStefan Hajnoczi             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
5135b82b703SStefan Hajnoczi                 offset = 0;
5145b82b703SStefan Hajnoczi                 idx++;
51520015f72SStefan Hajnoczi             }
5165b82b703SStefan Hajnoczi         }
51703bfc218SNicholas Piggin         if (num_dirty) {
51803bfc218SNicholas Piggin             cpu_physical_memory_dirty_bits_cleared(start, length);
51903bfc218SNicholas Piggin         }
520077874e0SPeter Xu 
521002cad6bSPeter Xu         if (rb->clear_bmap) {
522002cad6bSPeter Xu             /*
523002cad6bSPeter Xu              * Postpone the dirty bitmap clear to the point before we
524002cad6bSPeter Xu              * really send the pages, also we will split the clear
525002cad6bSPeter Xu              * dirty procedure into smaller chunks.
526002cad6bSPeter Xu              */
527002cad6bSPeter Xu             clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
528002cad6bSPeter Xu                            length >> TARGET_PAGE_BITS);
529002cad6bSPeter Xu         } else {
530002cad6bSPeter Xu             /* Slow path - still do that in a huge chunk */
531077874e0SPeter Xu             memory_region_clear_dirty_bitmap(rb->mr, start, length);
532002cad6bSPeter Xu         }
53320015f72SStefan Hajnoczi     } else {
534084140bdSHaozhong Zhang         ram_addr_t offset = rb->offset;
535084140bdSHaozhong Zhang 
53620015f72SStefan Hajnoczi         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
53703eebc9eSStefan Hajnoczi             if (cpu_physical_memory_test_and_clear_dirty(
538084140bdSHaozhong Zhang                         start + addr + offset,
53920015f72SStefan Hajnoczi                         TARGET_PAGE_SIZE,
54020015f72SStefan Hajnoczi                         DIRTY_MEMORY_MIGRATION)) {
54120015f72SStefan Hajnoczi                 long k = (start + addr) >> TARGET_PAGE_BITS;
54220015f72SStefan Hajnoczi                 if (!test_and_set_bit(k, dest)) {
54320015f72SStefan Hajnoczi                     num_dirty++;
54420015f72SStefan Hajnoczi                 }
54520015f72SStefan Hajnoczi             }
54620015f72SStefan Hajnoczi         }
54720015f72SStefan Hajnoczi     }
54820015f72SStefan Hajnoczi 
54920015f72SStefan Hajnoczi     return num_dirty;
55020015f72SStefan Hajnoczi }
551220c3ebdSJuan Quintela #endif
552220c3ebdSJuan Quintela #endif
553