xref: /openbmc/qemu/block/qcow2-cache.c (revision 14a650ec)
1 /*
2  * L2/refcount table cache for the QCOW2 format
3  *
4  * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "block/block_int.h"
26 #include "qemu-common.h"
27 #include "qcow2.h"
28 #include "trace.h"
29 
30 typedef struct Qcow2CachedTable {
31     void*   table;
32     int64_t offset;
33     bool    dirty;
34     int     cache_hits;
35     int     ref;
36 } Qcow2CachedTable;
37 
38 struct Qcow2Cache {
39     Qcow2CachedTable*       entries;
40     struct Qcow2Cache*      depends;
41     int                     size;
42     bool                    depends_on_flush;
43 };
44 
45 Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables)
46 {
47     BDRVQcowState *s = bs->opaque;
48     Qcow2Cache *c;
49     int i;
50 
51     c = g_malloc0(sizeof(*c));
52     c->size = num_tables;
53     c->entries = g_malloc0(sizeof(*c->entries) * num_tables);
54 
55     for (i = 0; i < c->size; i++) {
56         c->entries[i].table = qemu_blockalign(bs, s->cluster_size);
57     }
58 
59     return c;
60 }
61 
62 int qcow2_cache_destroy(BlockDriverState* bs, Qcow2Cache *c)
63 {
64     int i;
65 
66     for (i = 0; i < c->size; i++) {
67         assert(c->entries[i].ref == 0);
68         qemu_vfree(c->entries[i].table);
69     }
70 
71     g_free(c->entries);
72     g_free(c);
73 
74     return 0;
75 }
76 
77 static int qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
78 {
79     int ret;
80 
81     ret = qcow2_cache_flush(bs, c->depends);
82     if (ret < 0) {
83         return ret;
84     }
85 
86     c->depends = NULL;
87     c->depends_on_flush = false;
88 
89     return 0;
90 }
91 
92 static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
93 {
94     BDRVQcowState *s = bs->opaque;
95     int ret = 0;
96 
97     if (!c->entries[i].dirty || !c->entries[i].offset) {
98         return 0;
99     }
100 
101     trace_qcow2_cache_entry_flush(qemu_coroutine_self(),
102                                   c == s->l2_table_cache, i);
103 
104     if (c->depends) {
105         ret = qcow2_cache_flush_dependency(bs, c);
106     } else if (c->depends_on_flush) {
107         ret = bdrv_flush(bs->file);
108         if (ret >= 0) {
109             c->depends_on_flush = false;
110         }
111     }
112 
113     if (ret < 0) {
114         return ret;
115     }
116 
117     if (c == s->refcount_block_cache) {
118         ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_REFCOUNT_BLOCK,
119                 c->entries[i].offset, s->cluster_size);
120     } else if (c == s->l2_table_cache) {
121         ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
122                 c->entries[i].offset, s->cluster_size);
123     } else {
124         ret = qcow2_pre_write_overlap_check(bs, 0,
125                 c->entries[i].offset, s->cluster_size);
126     }
127 
128     if (ret < 0) {
129         return ret;
130     }
131 
132     if (c == s->refcount_block_cache) {
133         BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_UPDATE_PART);
134     } else if (c == s->l2_table_cache) {
135         BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
136     }
137 
138     ret = bdrv_pwrite(bs->file, c->entries[i].offset, c->entries[i].table,
139         s->cluster_size);
140     if (ret < 0) {
141         return ret;
142     }
143 
144     c->entries[i].dirty = false;
145 
146     return 0;
147 }
148 
149 int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c)
150 {
151     BDRVQcowState *s = bs->opaque;
152     int result = 0;
153     int ret;
154     int i;
155 
156     trace_qcow2_cache_flush(qemu_coroutine_self(), c == s->l2_table_cache);
157 
158     for (i = 0; i < c->size; i++) {
159         ret = qcow2_cache_entry_flush(bs, c, i);
160         if (ret < 0 && result != -ENOSPC) {
161             result = ret;
162         }
163     }
164 
165     if (result == 0) {
166         ret = bdrv_flush(bs->file);
167         if (ret < 0) {
168             result = ret;
169         }
170     }
171 
172     return result;
173 }
174 
175 int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
176     Qcow2Cache *dependency)
177 {
178     int ret;
179 
180     if (dependency->depends) {
181         ret = qcow2_cache_flush_dependency(bs, dependency);
182         if (ret < 0) {
183             return ret;
184         }
185     }
186 
187     if (c->depends && (c->depends != dependency)) {
188         ret = qcow2_cache_flush_dependency(bs, c);
189         if (ret < 0) {
190             return ret;
191         }
192     }
193 
194     c->depends = dependency;
195     return 0;
196 }
197 
198 void qcow2_cache_depends_on_flush(Qcow2Cache *c)
199 {
200     c->depends_on_flush = true;
201 }
202 
203 int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c)
204 {
205     int ret, i;
206 
207     ret = qcow2_cache_flush(bs, c);
208     if (ret < 0) {
209         return ret;
210     }
211 
212     for (i = 0; i < c->size; i++) {
213         assert(c->entries[i].ref == 0);
214         c->entries[i].offset = 0;
215         c->entries[i].cache_hits = 0;
216     }
217 
218     return 0;
219 }
220 
221 static int qcow2_cache_find_entry_to_replace(Qcow2Cache *c)
222 {
223     int i;
224     int min_count = INT_MAX;
225     int min_index = -1;
226 
227 
228     for (i = 0; i < c->size; i++) {
229         if (c->entries[i].ref) {
230             continue;
231         }
232 
233         if (c->entries[i].cache_hits < min_count) {
234             min_index = i;
235             min_count = c->entries[i].cache_hits;
236         }
237 
238         /* Give newer hits priority */
239         /* TODO Check how to optimize the replacement strategy */
240         c->entries[i].cache_hits /= 2;
241     }
242 
243     if (min_index == -1) {
244         /* This can't happen in current synchronous code, but leave the check
245          * here as a reminder for whoever starts using AIO with the cache */
246         abort();
247     }
248     return min_index;
249 }
250 
251 static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
252     uint64_t offset, void **table, bool read_from_disk)
253 {
254     BDRVQcowState *s = bs->opaque;
255     int i;
256     int ret;
257 
258     trace_qcow2_cache_get(qemu_coroutine_self(), c == s->l2_table_cache,
259                           offset, read_from_disk);
260 
261     /* Check if the table is already cached */
262     for (i = 0; i < c->size; i++) {
263         if (c->entries[i].offset == offset) {
264             goto found;
265         }
266     }
267 
268     /* If not, write a table back and replace it */
269     i = qcow2_cache_find_entry_to_replace(c);
270     trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(),
271                                         c == s->l2_table_cache, i);
272     if (i < 0) {
273         return i;
274     }
275 
276     ret = qcow2_cache_entry_flush(bs, c, i);
277     if (ret < 0) {
278         return ret;
279     }
280 
281     trace_qcow2_cache_get_read(qemu_coroutine_self(),
282                                c == s->l2_table_cache, i);
283     c->entries[i].offset = 0;
284     if (read_from_disk) {
285         if (c == s->l2_table_cache) {
286             BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
287         }
288 
289         ret = bdrv_pread(bs->file, offset, c->entries[i].table, s->cluster_size);
290         if (ret < 0) {
291             return ret;
292         }
293     }
294 
295     /* Give the table some hits for the start so that it won't be replaced
296      * immediately. The number 32 is completely arbitrary. */
297     c->entries[i].cache_hits = 32;
298     c->entries[i].offset = offset;
299 
300     /* And return the right table */
301 found:
302     c->entries[i].cache_hits++;
303     c->entries[i].ref++;
304     *table = c->entries[i].table;
305 
306     trace_qcow2_cache_get_done(qemu_coroutine_self(),
307                                c == s->l2_table_cache, i);
308 
309     return 0;
310 }
311 
312 int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
313     void **table)
314 {
315     return qcow2_cache_do_get(bs, c, offset, table, true);
316 }
317 
318 int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
319     void **table)
320 {
321     return qcow2_cache_do_get(bs, c, offset, table, false);
322 }
323 
324 int qcow2_cache_put(BlockDriverState *bs, Qcow2Cache *c, void **table)
325 {
326     int i;
327 
328     for (i = 0; i < c->size; i++) {
329         if (c->entries[i].table == *table) {
330             goto found;
331         }
332     }
333     return -ENOENT;
334 
335 found:
336     c->entries[i].ref--;
337     *table = NULL;
338 
339     assert(c->entries[i].ref >= 0);
340     return 0;
341 }
342 
343 void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table)
344 {
345     int i;
346 
347     for (i = 0; i < c->size; i++) {
348         if (c->entries[i].table == table) {
349             goto found;
350         }
351     }
352     abort();
353 
354 found:
355     c->entries[i].dirty = true;
356 }
357