1 /* 2 * L2/refcount table cache for the QCOW2 format 3 * 4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "block/block_int.h" 26 #include "qemu-common.h" 27 #include "qcow2.h" 28 #include "trace.h" 29 30 typedef struct Qcow2CachedTable { 31 void* table; 32 int64_t offset; 33 bool dirty; 34 int cache_hits; 35 int ref; 36 } Qcow2CachedTable; 37 38 struct Qcow2Cache { 39 Qcow2CachedTable* entries; 40 struct Qcow2Cache* depends; 41 int size; 42 bool depends_on_flush; 43 }; 44 45 Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables) 46 { 47 BDRVQcowState *s = bs->opaque; 48 Qcow2Cache *c; 49 int i; 50 51 c = g_malloc0(sizeof(*c)); 52 c->size = num_tables; 53 c->entries = g_malloc0(sizeof(*c->entries) * num_tables); 54 55 for (i = 0; i < c->size; i++) { 56 c->entries[i].table = qemu_blockalign(bs, s->cluster_size); 57 } 58 59 return c; 60 } 61 62 int qcow2_cache_destroy(BlockDriverState* bs, Qcow2Cache *c) 63 { 64 int i; 65 66 for (i = 0; i < c->size; i++) { 67 assert(c->entries[i].ref == 0); 68 qemu_vfree(c->entries[i].table); 69 } 70 71 g_free(c->entries); 72 g_free(c); 73 74 return 0; 75 } 76 77 static int qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c) 78 { 79 int ret; 80 81 ret = qcow2_cache_flush(bs, c->depends); 82 if (ret < 0) { 83 return ret; 84 } 85 86 c->depends = NULL; 87 c->depends_on_flush = false; 88 89 return 0; 90 } 91 92 static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i) 93 { 94 BDRVQcowState *s = bs->opaque; 95 int ret = 0; 96 97 if (!c->entries[i].dirty || !c->entries[i].offset) { 98 return 0; 99 } 100 101 trace_qcow2_cache_entry_flush(qemu_coroutine_self(), 102 c == s->l2_table_cache, i); 103 104 if (c->depends) { 105 ret = qcow2_cache_flush_dependency(bs, c); 106 } else if (c->depends_on_flush) { 107 ret = bdrv_flush(bs->file); 108 if (ret >= 0) { 109 c->depends_on_flush = false; 110 } 111 } 112 113 if (ret < 0) { 114 return ret; 115 } 116 117 if (c == s->refcount_block_cache) { 118 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_UPDATE_PART); 119 } else if (c == s->l2_table_cache) { 120 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE); 121 } 122 123 ret = bdrv_pwrite(bs->file, c->entries[i].offset, c->entries[i].table, 124 s->cluster_size); 125 if (ret < 0) { 126 return ret; 127 } 128 129 c->entries[i].dirty = false; 130 131 return 0; 132 } 133 134 int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c) 135 { 136 BDRVQcowState *s = bs->opaque; 137 int result = 0; 138 int ret; 139 int i; 140 141 trace_qcow2_cache_flush(qemu_coroutine_self(), c == s->l2_table_cache); 142 143 for (i = 0; i < c->size; i++) { 144 ret = qcow2_cache_entry_flush(bs, c, i); 145 if (ret < 0 && result != -ENOSPC) { 146 result = ret; 147 } 148 } 149 150 if (result == 0) { 151 ret = bdrv_flush(bs->file); 152 if (ret < 0) { 153 result = ret; 154 } 155 } 156 157 return result; 158 } 159 160 int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c, 161 Qcow2Cache *dependency) 162 { 163 int ret; 164 165 if (dependency->depends) { 166 ret = qcow2_cache_flush_dependency(bs, dependency); 167 if (ret < 0) { 168 return ret; 169 } 170 } 171 172 if (c->depends && (c->depends != dependency)) { 173 ret = qcow2_cache_flush_dependency(bs, c); 174 if (ret < 0) { 175 return ret; 176 } 177 } 178 179 c->depends = dependency; 180 return 0; 181 } 182 183 void qcow2_cache_depends_on_flush(Qcow2Cache *c) 184 { 185 c->depends_on_flush = true; 186 } 187 188 static int qcow2_cache_find_entry_to_replace(Qcow2Cache *c) 189 { 190 int i; 191 int min_count = INT_MAX; 192 int min_index = -1; 193 194 195 for (i = 0; i < c->size; i++) { 196 if (c->entries[i].ref) { 197 continue; 198 } 199 200 if (c->entries[i].cache_hits < min_count) { 201 min_index = i; 202 min_count = c->entries[i].cache_hits; 203 } 204 205 /* Give newer hits priority */ 206 /* TODO Check how to optimize the replacement strategy */ 207 c->entries[i].cache_hits /= 2; 208 } 209 210 if (min_index == -1) { 211 /* This can't happen in current synchronous code, but leave the check 212 * here as a reminder for whoever starts using AIO with the cache */ 213 abort(); 214 } 215 return min_index; 216 } 217 218 static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c, 219 uint64_t offset, void **table, bool read_from_disk) 220 { 221 BDRVQcowState *s = bs->opaque; 222 int i; 223 int ret; 224 225 trace_qcow2_cache_get(qemu_coroutine_self(), c == s->l2_table_cache, 226 offset, read_from_disk); 227 228 /* Check if the table is already cached */ 229 for (i = 0; i < c->size; i++) { 230 if (c->entries[i].offset == offset) { 231 goto found; 232 } 233 } 234 235 /* If not, write a table back and replace it */ 236 i = qcow2_cache_find_entry_to_replace(c); 237 trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(), 238 c == s->l2_table_cache, i); 239 if (i < 0) { 240 return i; 241 } 242 243 ret = qcow2_cache_entry_flush(bs, c, i); 244 if (ret < 0) { 245 return ret; 246 } 247 248 trace_qcow2_cache_get_read(qemu_coroutine_self(), 249 c == s->l2_table_cache, i); 250 c->entries[i].offset = 0; 251 if (read_from_disk) { 252 if (c == s->l2_table_cache) { 253 BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD); 254 } 255 256 ret = bdrv_pread(bs->file, offset, c->entries[i].table, s->cluster_size); 257 if (ret < 0) { 258 return ret; 259 } 260 } 261 262 /* Give the table some hits for the start so that it won't be replaced 263 * immediately. The number 32 is completely arbitrary. */ 264 c->entries[i].cache_hits = 32; 265 c->entries[i].offset = offset; 266 267 /* And return the right table */ 268 found: 269 c->entries[i].cache_hits++; 270 c->entries[i].ref++; 271 *table = c->entries[i].table; 272 273 trace_qcow2_cache_get_done(qemu_coroutine_self(), 274 c == s->l2_table_cache, i); 275 276 return 0; 277 } 278 279 int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset, 280 void **table) 281 { 282 return qcow2_cache_do_get(bs, c, offset, table, true); 283 } 284 285 int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset, 286 void **table) 287 { 288 return qcow2_cache_do_get(bs, c, offset, table, false); 289 } 290 291 int qcow2_cache_put(BlockDriverState *bs, Qcow2Cache *c, void **table) 292 { 293 int i; 294 295 for (i = 0; i < c->size; i++) { 296 if (c->entries[i].table == *table) { 297 goto found; 298 } 299 } 300 return -ENOENT; 301 302 found: 303 c->entries[i].ref--; 304 *table = NULL; 305 306 assert(c->entries[i].ref >= 0); 307 return 0; 308 } 309 310 void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table) 311 { 312 int i; 313 314 for (i = 0; i < c->size; i++) { 315 if (c->entries[i].table == table) { 316 goto found; 317 } 318 } 319 abort(); 320 321 found: 322 c->entries[i].dirty = true; 323 } 324