1 /* 2 * QEMU Enhanced Disk Format 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #ifndef BLOCK_QED_H 16 #define BLOCK_QED_H 17 18 #include "block/block_int.h" 19 #include "qemu/cutils.h" 20 21 /* The layout of a QED file is as follows: 22 * 23 * +--------+----------+----------+----------+-----+ 24 * | header | L1 table | cluster0 | cluster1 | ... | 25 * +--------+----------+----------+----------+-----+ 26 * 27 * There is a 2-level pagetable for cluster allocation: 28 * 29 * +----------+ 30 * | L1 table | 31 * +----------+ 32 * ,------' | '------. 33 * +----------+ | +----------+ 34 * | L2 table | ... | L2 table | 35 * +----------+ +----------+ 36 * ,------' | '------. 37 * +----------+ | +----------+ 38 * | Data | ... | Data | 39 * +----------+ +----------+ 40 * 41 * The L1 table is fixed size and always present. L2 tables are allocated on 42 * demand. The L1 table size determines the maximum possible image size; it 43 * can be influenced using the cluster_size and table_size values. 44 * 45 * All fields are little-endian on disk. 46 */ 47 #define QED_DEFAULT_CLUSTER_SIZE 65536 48 enum { 49 QED_MAGIC = 'Q' | 'E' << 8 | 'D' << 16 | '\0' << 24, 50 51 /* The image supports a backing file */ 52 QED_F_BACKING_FILE = 0x01, 53 54 /* The image needs a consistency check before use */ 55 QED_F_NEED_CHECK = 0x02, 56 57 /* The backing file format must not be probed, treat as raw image */ 58 QED_F_BACKING_FORMAT_NO_PROBE = 0x04, 59 60 /* Feature bits must be used when the on-disk format changes */ 61 QED_FEATURE_MASK = QED_F_BACKING_FILE | /* supported feature bits */ 62 QED_F_NEED_CHECK | 63 QED_F_BACKING_FORMAT_NO_PROBE, 64 QED_COMPAT_FEATURE_MASK = 0, /* supported compat feature bits */ 65 QED_AUTOCLEAR_FEATURE_MASK = 0, /* supported autoclear feature bits */ 66 67 /* Data is stored in groups of sectors called clusters. Cluster size must 68 * be large to avoid keeping too much metadata. I/O requests that have 69 * sub-cluster size will require read-modify-write. 70 */ 71 QED_MIN_CLUSTER_SIZE = 4 * 1024, /* in bytes */ 72 QED_MAX_CLUSTER_SIZE = 64 * 1024 * 1024, 73 74 /* Allocated clusters are tracked using a 2-level pagetable. Table size is 75 * a multiple of clusters so large maximum image sizes can be supported 76 * without jacking up the cluster size too much. 77 */ 78 QED_MIN_TABLE_SIZE = 1, /* in clusters */ 79 QED_MAX_TABLE_SIZE = 16, 80 QED_DEFAULT_TABLE_SIZE = 4, 81 82 /* Delay to flush and clean image after last allocating write completes */ 83 QED_NEED_CHECK_TIMEOUT = 5, /* in seconds */ 84 }; 85 86 typedef struct { 87 uint32_t magic; /* QED\0 */ 88 89 uint32_t cluster_size; /* in bytes */ 90 uint32_t table_size; /* for L1 and L2 tables, in clusters */ 91 uint32_t header_size; /* in clusters */ 92 93 uint64_t features; /* format feature bits */ 94 uint64_t compat_features; /* compatible feature bits */ 95 uint64_t autoclear_features; /* self-resetting feature bits */ 96 97 uint64_t l1_table_offset; /* in bytes */ 98 uint64_t image_size; /* total logical image size, in bytes */ 99 100 /* if (features & QED_F_BACKING_FILE) */ 101 uint32_t backing_filename_offset; /* in bytes from start of header */ 102 uint32_t backing_filename_size; /* in bytes */ 103 } QEMU_PACKED QEDHeader; 104 105 typedef struct { 106 uint64_t offsets[0]; /* in bytes */ 107 } QEDTable; 108 109 /* The L2 cache is a simple write-through cache for L2 structures */ 110 typedef struct CachedL2Table { 111 QEDTable *table; 112 uint64_t offset; /* offset=0 indicates an invalidate entry */ 113 QTAILQ_ENTRY(CachedL2Table) node; 114 int ref; 115 } CachedL2Table; 116 117 typedef struct { 118 QTAILQ_HEAD(, CachedL2Table) entries; 119 unsigned int n_entries; 120 } L2TableCache; 121 122 typedef struct QEDRequest { 123 CachedL2Table *l2_table; 124 } QEDRequest; 125 126 enum { 127 QED_AIOCB_WRITE = 0x0001, /* read or write? */ 128 QED_AIOCB_ZERO = 0x0002, /* zero write, used with QED_AIOCB_WRITE */ 129 }; 130 131 typedef struct QEDAIOCB { 132 BlockAIOCB common; 133 int bh_ret; /* final return status for completion bh */ 134 QSIMPLEQ_ENTRY(QEDAIOCB) next; /* next request */ 135 int flags; /* QED_AIOCB_* bits ORed together */ 136 uint64_t end_pos; /* request end on block device, in bytes */ 137 138 /* User scatter-gather list */ 139 QEMUIOVector *qiov; 140 size_t qiov_offset; /* byte count already processed */ 141 142 /* Current cluster scatter-gather list */ 143 QEMUIOVector cur_qiov; 144 QEMUIOVector *backing_qiov; 145 uint64_t cur_pos; /* position on block device, in bytes */ 146 uint64_t cur_cluster; /* cluster offset in image file */ 147 unsigned int cur_nclusters; /* number of clusters being accessed */ 148 int find_cluster_ret; /* used for L1/L2 update */ 149 150 QEDRequest request; 151 } QEDAIOCB; 152 153 typedef struct { 154 BlockDriverState *bs; /* device */ 155 uint64_t file_size; /* length of image file, in bytes */ 156 157 QEDHeader header; /* always cpu-endian */ 158 QEDTable *l1_table; 159 L2TableCache l2_cache; /* l2 table cache */ 160 uint32_t table_nelems; 161 uint32_t l1_shift; 162 uint32_t l2_shift; 163 uint32_t l2_mask; 164 165 /* Allocating write request queue */ 166 QSIMPLEQ_HEAD(, QEDAIOCB) allocating_write_reqs; 167 bool allocating_write_reqs_plugged; 168 169 /* Periodic flush and clear need check flag */ 170 QEMUTimer *need_check_timer; 171 } BDRVQEDState; 172 173 enum { 174 QED_CLUSTER_FOUND, /* cluster found */ 175 QED_CLUSTER_ZERO, /* zero cluster found */ 176 QED_CLUSTER_L2, /* cluster missing in L2 */ 177 QED_CLUSTER_L1, /* cluster missing in L1 */ 178 }; 179 180 /** 181 * qed_find_cluster() completion callback 182 * 183 * @opaque: User data for completion callback 184 * @ret: QED_CLUSTER_FOUND Success 185 * QED_CLUSTER_L2 Data cluster unallocated in L2 186 * QED_CLUSTER_L1 L2 unallocated in L1 187 * -errno POSIX error occurred 188 * @offset: Data cluster offset 189 * @len: Contiguous bytes starting from cluster offset 190 * 191 * This function is invoked when qed_find_cluster() completes. 192 * 193 * On success ret is QED_CLUSTER_FOUND and offset/len are a contiguous range 194 * in the image file. 195 * 196 * On failure ret is QED_CLUSTER_L2 or QED_CLUSTER_L1 for missing L2 or L1 197 * table offset, respectively. len is number of contiguous unallocated bytes. 198 */ 199 typedef void QEDFindClusterFunc(void *opaque, int ret, uint64_t offset, size_t len); 200 201 void qed_acquire(BDRVQEDState *s); 202 void qed_release(BDRVQEDState *s); 203 204 /** 205 * Generic callback for chaining async callbacks 206 */ 207 typedef struct { 208 BlockCompletionFunc *cb; 209 void *opaque; 210 } GenericCB; 211 212 void *gencb_alloc(size_t len, BlockCompletionFunc *cb, void *opaque); 213 void gencb_complete(void *opaque, int ret); 214 215 /** 216 * Header functions 217 */ 218 int qed_write_header_sync(BDRVQEDState *s); 219 220 /** 221 * L2 cache functions 222 */ 223 void qed_init_l2_cache(L2TableCache *l2_cache); 224 void qed_free_l2_cache(L2TableCache *l2_cache); 225 CachedL2Table *qed_alloc_l2_cache_entry(L2TableCache *l2_cache); 226 void qed_unref_l2_cache_entry(CachedL2Table *entry); 227 CachedL2Table *qed_find_l2_cache_entry(L2TableCache *l2_cache, uint64_t offset); 228 void qed_commit_l2_cache_entry(L2TableCache *l2_cache, CachedL2Table *l2_table); 229 230 /** 231 * Table I/O functions 232 */ 233 int qed_read_l1_table_sync(BDRVQEDState *s); 234 void qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n, 235 BlockCompletionFunc *cb, void *opaque); 236 int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index, 237 unsigned int n); 238 int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, 239 uint64_t offset); 240 void qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset, 241 BlockCompletionFunc *cb, void *opaque); 242 void qed_write_l2_table(BDRVQEDState *s, QEDRequest *request, 243 unsigned int index, unsigned int n, bool flush, 244 BlockCompletionFunc *cb, void *opaque); 245 int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request, 246 unsigned int index, unsigned int n, bool flush); 247 248 /** 249 * Cluster functions 250 */ 251 void qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos, 252 size_t len, QEDFindClusterFunc *cb, void *opaque); 253 254 /** 255 * Consistency check 256 */ 257 int qed_check(BDRVQEDState *s, BdrvCheckResult *result, bool fix); 258 259 QEDTable *qed_alloc_table(BDRVQEDState *s); 260 261 /** 262 * Round down to the start of a cluster 263 */ 264 static inline uint64_t qed_start_of_cluster(BDRVQEDState *s, uint64_t offset) 265 { 266 return offset & ~(uint64_t)(s->header.cluster_size - 1); 267 } 268 269 static inline uint64_t qed_offset_into_cluster(BDRVQEDState *s, uint64_t offset) 270 { 271 return offset & (s->header.cluster_size - 1); 272 } 273 274 static inline uint64_t qed_bytes_to_clusters(BDRVQEDState *s, uint64_t bytes) 275 { 276 return qed_start_of_cluster(s, bytes + (s->header.cluster_size - 1)) / 277 (s->header.cluster_size - 1); 278 } 279 280 static inline unsigned int qed_l1_index(BDRVQEDState *s, uint64_t pos) 281 { 282 return pos >> s->l1_shift; 283 } 284 285 static inline unsigned int qed_l2_index(BDRVQEDState *s, uint64_t pos) 286 { 287 return (pos >> s->l2_shift) & s->l2_mask; 288 } 289 290 /** 291 * Test if a cluster offset is valid 292 */ 293 static inline bool qed_check_cluster_offset(BDRVQEDState *s, uint64_t offset) 294 { 295 uint64_t header_size = (uint64_t)s->header.header_size * 296 s->header.cluster_size; 297 298 if (offset & (s->header.cluster_size - 1)) { 299 return false; 300 } 301 return offset >= header_size && offset < s->file_size; 302 } 303 304 /** 305 * Test if a table offset is valid 306 */ 307 static inline bool qed_check_table_offset(BDRVQEDState *s, uint64_t offset) 308 { 309 uint64_t end_offset = offset + (s->header.table_size - 1) * 310 s->header.cluster_size; 311 312 /* Overflow check */ 313 if (end_offset <= offset) { 314 return false; 315 } 316 317 return qed_check_cluster_offset(s, offset) && 318 qed_check_cluster_offset(s, end_offset); 319 } 320 321 static inline bool qed_offset_is_cluster_aligned(BDRVQEDState *s, 322 uint64_t offset) 323 { 324 if (qed_offset_into_cluster(s, offset)) { 325 return false; 326 } 327 return true; 328 } 329 330 static inline bool qed_offset_is_unalloc_cluster(uint64_t offset) 331 { 332 if (offset == 0) { 333 return true; 334 } 335 return false; 336 } 337 338 static inline bool qed_offset_is_zero_cluster(uint64_t offset) 339 { 340 if (offset == 1) { 341 return true; 342 } 343 return false; 344 } 345 346 #endif /* BLOCK_QED_H */ 347