175411d23SStefan Hajnoczi /*
275411d23SStefan Hajnoczi * QEMU Enhanced Disk Format
375411d23SStefan Hajnoczi *
475411d23SStefan Hajnoczi * Copyright IBM, Corp. 2010
575411d23SStefan Hajnoczi *
675411d23SStefan Hajnoczi * Authors:
775411d23SStefan Hajnoczi * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
875411d23SStefan Hajnoczi * Anthony Liguori <aliguori@us.ibm.com>
975411d23SStefan Hajnoczi *
1075411d23SStefan Hajnoczi * This work is licensed under the terms of the GNU LGPL, version 2 or later.
1175411d23SStefan Hajnoczi * See the COPYING.LIB file in the top-level directory.
1275411d23SStefan Hajnoczi *
1375411d23SStefan Hajnoczi */
1475411d23SStefan Hajnoczi
1575411d23SStefan Hajnoczi #ifndef BLOCK_QED_H
1675411d23SStefan Hajnoczi #define BLOCK_QED_H
1775411d23SStefan Hajnoczi
18737e150eSPaolo Bonzini #include "block/block_int.h"
19f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
2075411d23SStefan Hajnoczi
2175411d23SStefan Hajnoczi /* The layout of a QED file is as follows:
2275411d23SStefan Hajnoczi *
2375411d23SStefan Hajnoczi * +--------+----------+----------+----------+-----+
2475411d23SStefan Hajnoczi * | header | L1 table | cluster0 | cluster1 | ... |
2575411d23SStefan Hajnoczi * +--------+----------+----------+----------+-----+
2675411d23SStefan Hajnoczi *
2775411d23SStefan Hajnoczi * There is a 2-level pagetable for cluster allocation:
2875411d23SStefan Hajnoczi *
2975411d23SStefan Hajnoczi * +----------+
3075411d23SStefan Hajnoczi * | L1 table |
3175411d23SStefan Hajnoczi * +----------+
3275411d23SStefan Hajnoczi * ,------' | '------.
3375411d23SStefan Hajnoczi * +----------+ | +----------+
3475411d23SStefan Hajnoczi * | L2 table | ... | L2 table |
3575411d23SStefan Hajnoczi * +----------+ +----------+
3675411d23SStefan Hajnoczi * ,------' | '------.
3775411d23SStefan Hajnoczi * +----------+ | +----------+
3875411d23SStefan Hajnoczi * | Data | ... | Data |
3975411d23SStefan Hajnoczi * +----------+ +----------+
4075411d23SStefan Hajnoczi *
4175411d23SStefan Hajnoczi * The L1 table is fixed size and always present. L2 tables are allocated on
4275411d23SStefan Hajnoczi * demand. The L1 table size determines the maximum possible image size; it
4375411d23SStefan Hajnoczi * can be influenced using the cluster_size and table_size values.
4475411d23SStefan Hajnoczi *
4575411d23SStefan Hajnoczi * All fields are little-endian on disk.
4675411d23SStefan Hajnoczi */
477ab74849SChunyan Liu #define QED_DEFAULT_CLUSTER_SIZE 65536
4875411d23SStefan Hajnoczi enum {
4975411d23SStefan Hajnoczi QED_MAGIC = 'Q' | 'E' << 8 | 'D' << 16 | '\0' << 24,
5075411d23SStefan Hajnoczi
5175411d23SStefan Hajnoczi /* The image supports a backing file */
5275411d23SStefan Hajnoczi QED_F_BACKING_FILE = 0x01,
5375411d23SStefan Hajnoczi
5401979a98SStefan Hajnoczi /* The image needs a consistency check before use */
5501979a98SStefan Hajnoczi QED_F_NEED_CHECK = 0x02,
5601979a98SStefan Hajnoczi
5775411d23SStefan Hajnoczi /* The backing file format must not be probed, treat as raw image */
5875411d23SStefan Hajnoczi QED_F_BACKING_FORMAT_NO_PROBE = 0x04,
5975411d23SStefan Hajnoczi
6075411d23SStefan Hajnoczi /* Feature bits must be used when the on-disk format changes */
6175411d23SStefan Hajnoczi QED_FEATURE_MASK = QED_F_BACKING_FILE | /* supported feature bits */
6201979a98SStefan Hajnoczi QED_F_NEED_CHECK |
6375411d23SStefan Hajnoczi QED_F_BACKING_FORMAT_NO_PROBE,
6475411d23SStefan Hajnoczi QED_COMPAT_FEATURE_MASK = 0, /* supported compat feature bits */
6575411d23SStefan Hajnoczi QED_AUTOCLEAR_FEATURE_MASK = 0, /* supported autoclear feature bits */
6675411d23SStefan Hajnoczi
6775411d23SStefan Hajnoczi /* Data is stored in groups of sectors called clusters. Cluster size must
6875411d23SStefan Hajnoczi * be large to avoid keeping too much metadata. I/O requests that have
6975411d23SStefan Hajnoczi * sub-cluster size will require read-modify-write.
7075411d23SStefan Hajnoczi */
7175411d23SStefan Hajnoczi QED_MIN_CLUSTER_SIZE = 4 * 1024, /* in bytes */
7275411d23SStefan Hajnoczi QED_MAX_CLUSTER_SIZE = 64 * 1024 * 1024,
7375411d23SStefan Hajnoczi
7475411d23SStefan Hajnoczi /* Allocated clusters are tracked using a 2-level pagetable. Table size is
7575411d23SStefan Hajnoczi * a multiple of clusters so large maximum image sizes can be supported
7675411d23SStefan Hajnoczi * without jacking up the cluster size too much.
7775411d23SStefan Hajnoczi */
7875411d23SStefan Hajnoczi QED_MIN_TABLE_SIZE = 1, /* in clusters */
7975411d23SStefan Hajnoczi QED_MAX_TABLE_SIZE = 16,
8075411d23SStefan Hajnoczi QED_DEFAULT_TABLE_SIZE = 4,
816f321e93SStefan Hajnoczi
826f321e93SStefan Hajnoczi /* Delay to flush and clean image after last allocating write completes */
836f321e93SStefan Hajnoczi QED_NEED_CHECK_TIMEOUT = 5, /* in seconds */
8475411d23SStefan Hajnoczi };
8575411d23SStefan Hajnoczi
8675411d23SStefan Hajnoczi typedef struct {
8775411d23SStefan Hajnoczi uint32_t magic; /* QED\0 */
8875411d23SStefan Hajnoczi
8975411d23SStefan Hajnoczi uint32_t cluster_size; /* in bytes */
9075411d23SStefan Hajnoczi uint32_t table_size; /* for L1 and L2 tables, in clusters */
9175411d23SStefan Hajnoczi uint32_t header_size; /* in clusters */
9275411d23SStefan Hajnoczi
9375411d23SStefan Hajnoczi uint64_t features; /* format feature bits */
9475411d23SStefan Hajnoczi uint64_t compat_features; /* compatible feature bits */
9575411d23SStefan Hajnoczi uint64_t autoclear_features; /* self-resetting feature bits */
9675411d23SStefan Hajnoczi
9775411d23SStefan Hajnoczi uint64_t l1_table_offset; /* in bytes */
9875411d23SStefan Hajnoczi uint64_t image_size; /* total logical image size, in bytes */
9975411d23SStefan Hajnoczi
10075411d23SStefan Hajnoczi /* if (features & QED_F_BACKING_FILE) */
10175411d23SStefan Hajnoczi uint32_t backing_filename_offset; /* in bytes from start of header */
10275411d23SStefan Hajnoczi uint32_t backing_filename_size; /* in bytes */
103687fb893SJeff Cody } QEMU_PACKED QEDHeader;
10475411d23SStefan Hajnoczi
10575411d23SStefan Hajnoczi typedef struct {
106298800caSStefan Hajnoczi uint64_t offsets[0]; /* in bytes */
107298800caSStefan Hajnoczi } QEDTable;
108298800caSStefan Hajnoczi
109298800caSStefan Hajnoczi /* The L2 cache is a simple write-through cache for L2 structures */
110298800caSStefan Hajnoczi typedef struct CachedL2Table {
111298800caSStefan Hajnoczi QEDTable *table;
112298800caSStefan Hajnoczi uint64_t offset; /* offset=0 indicates an invalidate entry */
113298800caSStefan Hajnoczi QTAILQ_ENTRY(CachedL2Table) node;
114298800caSStefan Hajnoczi int ref;
115298800caSStefan Hajnoczi } CachedL2Table;
116298800caSStefan Hajnoczi
117298800caSStefan Hajnoczi typedef struct {
118298800caSStefan Hajnoczi QTAILQ_HEAD(, CachedL2Table) entries;
119298800caSStefan Hajnoczi unsigned int n_entries;
120298800caSStefan Hajnoczi } L2TableCache;
121298800caSStefan Hajnoczi
122298800caSStefan Hajnoczi typedef struct QEDRequest {
123298800caSStefan Hajnoczi CachedL2Table *l2_table;
124298800caSStefan Hajnoczi } QEDRequest;
125298800caSStefan Hajnoczi
1266e4f59bdSStefan Hajnoczi enum {
1276e4f59bdSStefan Hajnoczi QED_AIOCB_WRITE = 0x0001, /* read or write? */
1280e71be19SStefan Hajnoczi QED_AIOCB_ZERO = 0x0002, /* zero write, used with QED_AIOCB_WRITE */
1296e4f59bdSStefan Hajnoczi };
1306e4f59bdSStefan Hajnoczi
131eabba580SStefan Hajnoczi typedef struct QEDAIOCB {
13248cc565eSKevin Wolf BlockDriverState *bs;
133eabba580SStefan Hajnoczi QSIMPLEQ_ENTRY(QEDAIOCB) next; /* next request */
1346e4f59bdSStefan Hajnoczi int flags; /* QED_AIOCB_* bits ORed together */
135eabba580SStefan Hajnoczi uint64_t end_pos; /* request end on block device, in bytes */
136eabba580SStefan Hajnoczi
137eabba580SStefan Hajnoczi /* User scatter-gather list */
138eabba580SStefan Hajnoczi QEMUIOVector *qiov;
139eabba580SStefan Hajnoczi size_t qiov_offset; /* byte count already processed */
140eabba580SStefan Hajnoczi
141eabba580SStefan Hajnoczi /* Current cluster scatter-gather list */
142eabba580SStefan Hajnoczi QEMUIOVector cur_qiov;
143eabba580SStefan Hajnoczi uint64_t cur_pos; /* position on block device, in bytes */
144eabba580SStefan Hajnoczi uint64_t cur_cluster; /* cluster offset in image file */
145eabba580SStefan Hajnoczi unsigned int cur_nclusters; /* number of clusters being accessed */
146eabba580SStefan Hajnoczi int find_cluster_ret; /* used for L1/L2 update */
147eabba580SStefan Hajnoczi
148eabba580SStefan Hajnoczi QEDRequest request;
149eabba580SStefan Hajnoczi } QEDAIOCB;
150eabba580SStefan Hajnoczi
151298800caSStefan Hajnoczi typedef struct {
15275411d23SStefan Hajnoczi BlockDriverState *bs; /* device */
15375411d23SStefan Hajnoczi
1541f01e50bSPaolo Bonzini /* Written only by an allocating write or the timer handler (the latter
1551f01e50bSPaolo Bonzini * while allocating reqs are plugged).
1561f01e50bSPaolo Bonzini */
15775411d23SStefan Hajnoczi QEDHeader header; /* always cpu-endian */
1581f01e50bSPaolo Bonzini
1591f01e50bSPaolo Bonzini /* Protected by table_lock. */
1601f01e50bSPaolo Bonzini CoMutex table_lock;
161298800caSStefan Hajnoczi QEDTable *l1_table;
162298800caSStefan Hajnoczi L2TableCache l2_cache; /* l2 table cache */
16375411d23SStefan Hajnoczi uint32_t table_nelems;
16475411d23SStefan Hajnoczi uint32_t l1_shift;
16575411d23SStefan Hajnoczi uint32_t l2_shift;
16675411d23SStefan Hajnoczi uint32_t l2_mask;
1671f01e50bSPaolo Bonzini uint64_t file_size; /* length of image file, in bytes */
168eabba580SStefan Hajnoczi
169eabba580SStefan Hajnoczi /* Allocating write request queue */
1700806c3b5SKevin Wolf QEDAIOCB *allocating_acb;
1710806c3b5SKevin Wolf CoQueue allocating_write_reqs;
1726f321e93SStefan Hajnoczi bool allocating_write_reqs_plugged;
1736f321e93SStefan Hajnoczi
1746f321e93SStefan Hajnoczi /* Periodic flush and clear need check flag */
1756f321e93SStefan Hajnoczi QEMUTimer *need_check_timer;
17675411d23SStefan Hajnoczi } BDRVQEDState;
17775411d23SStefan Hajnoczi
178298800caSStefan Hajnoczi enum {
179298800caSStefan Hajnoczi QED_CLUSTER_FOUND, /* cluster found */
18021df65b6SAnthony Liguori QED_CLUSTER_ZERO, /* zero cluster found */
181298800caSStefan Hajnoczi QED_CLUSTER_L2, /* cluster missing in L2 */
182298800caSStefan Hajnoczi QED_CLUSTER_L1, /* cluster missing in L1 */
183298800caSStefan Hajnoczi };
184298800caSStefan Hajnoczi
185298800caSStefan Hajnoczi /**
186b10170acSStefan Hajnoczi * Header functions
187b10170acSStefan Hajnoczi */
188*1f051dcbSKevin Wolf int GRAPH_RDLOCK qed_write_header_sync(BDRVQEDState *s);
189b10170acSStefan Hajnoczi
190b10170acSStefan Hajnoczi /**
191298800caSStefan Hajnoczi * L2 cache functions
192298800caSStefan Hajnoczi */
193298800caSStefan Hajnoczi void qed_init_l2_cache(L2TableCache *l2_cache);
194298800caSStefan Hajnoczi void qed_free_l2_cache(L2TableCache *l2_cache);
195298800caSStefan Hajnoczi CachedL2Table *qed_alloc_l2_cache_entry(L2TableCache *l2_cache);
196298800caSStefan Hajnoczi void qed_unref_l2_cache_entry(CachedL2Table *entry);
197298800caSStefan Hajnoczi CachedL2Table *qed_find_l2_cache_entry(L2TableCache *l2_cache, uint64_t offset);
198298800caSStefan Hajnoczi void qed_commit_l2_cache_entry(L2TableCache *l2_cache, CachedL2Table *l2_table);
199298800caSStefan Hajnoczi
200298800caSStefan Hajnoczi /**
201298800caSStefan Hajnoczi * Table I/O functions
202298800caSStefan Hajnoczi */
203b9b10c35SKevin Wolf int coroutine_fn GRAPH_RDLOCK qed_read_l1_table_sync(BDRVQEDState *s);
20488095349SEmanuele Giuseppe Esposito
20588095349SEmanuele Giuseppe Esposito int coroutine_fn GRAPH_RDLOCK
20688095349SEmanuele Giuseppe Esposito qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n);
20788095349SEmanuele Giuseppe Esposito
20888095349SEmanuele Giuseppe Esposito int coroutine_fn GRAPH_RDLOCK
20988095349SEmanuele Giuseppe Esposito qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index, unsigned int n);
21088095349SEmanuele Giuseppe Esposito
211b9b10c35SKevin Wolf int coroutine_fn GRAPH_RDLOCK
212b9b10c35SKevin Wolf qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset);
213b9b10c35SKevin Wolf
214b9b10c35SKevin Wolf int coroutine_fn GRAPH_RDLOCK
215b9b10c35SKevin Wolf qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset);
21688095349SEmanuele Giuseppe Esposito
21788095349SEmanuele Giuseppe Esposito int coroutine_fn GRAPH_RDLOCK
21888095349SEmanuele Giuseppe Esposito qed_write_l2_table(BDRVQEDState *s, QEDRequest *request, unsigned int index,
21988095349SEmanuele Giuseppe Esposito unsigned int n, bool flush);
22088095349SEmanuele Giuseppe Esposito
22188095349SEmanuele Giuseppe Esposito int coroutine_fn GRAPH_RDLOCK
22288095349SEmanuele Giuseppe Esposito qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
22388095349SEmanuele Giuseppe Esposito unsigned int index, unsigned int n, bool flush);
224298800caSStefan Hajnoczi
225298800caSStefan Hajnoczi /**
226298800caSStefan Hajnoczi * Cluster functions
227298800caSStefan Hajnoczi */
228b9b10c35SKevin Wolf int coroutine_fn GRAPH_RDLOCK
229b9b10c35SKevin Wolf qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos,
230b9b10c35SKevin Wolf size_t *len, uint64_t *img_offset);
231298800caSStefan Hajnoczi
232298800caSStefan Hajnoczi /**
233298800caSStefan Hajnoczi * Consistency check
234298800caSStefan Hajnoczi */
23588095349SEmanuele Giuseppe Esposito int coroutine_fn GRAPH_RDLOCK
23688095349SEmanuele Giuseppe Esposito qed_check(BDRVQEDState *s, BdrvCheckResult *result, bool fix);
23788095349SEmanuele Giuseppe Esposito
238298800caSStefan Hajnoczi QEDTable *qed_alloc_table(BDRVQEDState *s);
239298800caSStefan Hajnoczi
24075411d23SStefan Hajnoczi /**
24175411d23SStefan Hajnoczi * Round down to the start of a cluster
24275411d23SStefan Hajnoczi */
qed_start_of_cluster(BDRVQEDState * s,uint64_t offset)24375411d23SStefan Hajnoczi static inline uint64_t qed_start_of_cluster(BDRVQEDState *s, uint64_t offset)
24475411d23SStefan Hajnoczi {
24575411d23SStefan Hajnoczi return offset & ~(uint64_t)(s->header.cluster_size - 1);
24675411d23SStefan Hajnoczi }
24775411d23SStefan Hajnoczi
qed_offset_into_cluster(BDRVQEDState * s,uint64_t offset)248298800caSStefan Hajnoczi static inline uint64_t qed_offset_into_cluster(BDRVQEDState *s, uint64_t offset)
249298800caSStefan Hajnoczi {
250298800caSStefan Hajnoczi return offset & (s->header.cluster_size - 1);
251298800caSStefan Hajnoczi }
252298800caSStefan Hajnoczi
qed_bytes_to_clusters(BDRVQEDState * s,uint64_t bytes)25319dfc44aSStefan Hajnoczi static inline uint64_t qed_bytes_to_clusters(BDRVQEDState *s, uint64_t bytes)
254298800caSStefan Hajnoczi {
255298800caSStefan Hajnoczi return qed_start_of_cluster(s, bytes + (s->header.cluster_size - 1)) /
256298800caSStefan Hajnoczi (s->header.cluster_size - 1);
257298800caSStefan Hajnoczi }
258298800caSStefan Hajnoczi
qed_l1_index(BDRVQEDState * s,uint64_t pos)259298800caSStefan Hajnoczi static inline unsigned int qed_l1_index(BDRVQEDState *s, uint64_t pos)
260298800caSStefan Hajnoczi {
261298800caSStefan Hajnoczi return pos >> s->l1_shift;
262298800caSStefan Hajnoczi }
263298800caSStefan Hajnoczi
qed_l2_index(BDRVQEDState * s,uint64_t pos)264298800caSStefan Hajnoczi static inline unsigned int qed_l2_index(BDRVQEDState *s, uint64_t pos)
265298800caSStefan Hajnoczi {
266298800caSStefan Hajnoczi return (pos >> s->l2_shift) & s->l2_mask;
267298800caSStefan Hajnoczi }
268298800caSStefan Hajnoczi
26975411d23SStefan Hajnoczi /**
27075411d23SStefan Hajnoczi * Test if a cluster offset is valid
27175411d23SStefan Hajnoczi */
qed_check_cluster_offset(BDRVQEDState * s,uint64_t offset)27275411d23SStefan Hajnoczi static inline bool qed_check_cluster_offset(BDRVQEDState *s, uint64_t offset)
27375411d23SStefan Hajnoczi {
27475411d23SStefan Hajnoczi uint64_t header_size = (uint64_t)s->header.header_size *
27575411d23SStefan Hajnoczi s->header.cluster_size;
27675411d23SStefan Hajnoczi
27775411d23SStefan Hajnoczi if (offset & (s->header.cluster_size - 1)) {
27875411d23SStefan Hajnoczi return false;
27975411d23SStefan Hajnoczi }
28075411d23SStefan Hajnoczi return offset >= header_size && offset < s->file_size;
28175411d23SStefan Hajnoczi }
28275411d23SStefan Hajnoczi
28375411d23SStefan Hajnoczi /**
28475411d23SStefan Hajnoczi * Test if a table offset is valid
28575411d23SStefan Hajnoczi */
qed_check_table_offset(BDRVQEDState * s,uint64_t offset)28675411d23SStefan Hajnoczi static inline bool qed_check_table_offset(BDRVQEDState *s, uint64_t offset)
28775411d23SStefan Hajnoczi {
28875411d23SStefan Hajnoczi uint64_t end_offset = offset + (s->header.table_size - 1) *
28975411d23SStefan Hajnoczi s->header.cluster_size;
29075411d23SStefan Hajnoczi
29175411d23SStefan Hajnoczi /* Overflow check */
29275411d23SStefan Hajnoczi if (end_offset <= offset) {
29375411d23SStefan Hajnoczi return false;
29475411d23SStefan Hajnoczi }
29575411d23SStefan Hajnoczi
29675411d23SStefan Hajnoczi return qed_check_cluster_offset(s, offset) &&
29775411d23SStefan Hajnoczi qed_check_cluster_offset(s, end_offset);
29875411d23SStefan Hajnoczi }
29975411d23SStefan Hajnoczi
qed_offset_is_cluster_aligned(BDRVQEDState * s,uint64_t offset)30021df65b6SAnthony Liguori static inline bool qed_offset_is_cluster_aligned(BDRVQEDState *s,
30121df65b6SAnthony Liguori uint64_t offset)
30221df65b6SAnthony Liguori {
30321df65b6SAnthony Liguori if (qed_offset_into_cluster(s, offset)) {
30421df65b6SAnthony Liguori return false;
30521df65b6SAnthony Liguori }
30621df65b6SAnthony Liguori return true;
30721df65b6SAnthony Liguori }
30821df65b6SAnthony Liguori
qed_offset_is_unalloc_cluster(uint64_t offset)30921df65b6SAnthony Liguori static inline bool qed_offset_is_unalloc_cluster(uint64_t offset)
31021df65b6SAnthony Liguori {
31121df65b6SAnthony Liguori if (offset == 0) {
31221df65b6SAnthony Liguori return true;
31321df65b6SAnthony Liguori }
31421df65b6SAnthony Liguori return false;
31521df65b6SAnthony Liguori }
31621df65b6SAnthony Liguori
qed_offset_is_zero_cluster(uint64_t offset)31721df65b6SAnthony Liguori static inline bool qed_offset_is_zero_cluster(uint64_t offset)
31821df65b6SAnthony Liguori {
31921df65b6SAnthony Liguori if (offset == 1) {
32021df65b6SAnthony Liguori return true;
32121df65b6SAnthony Liguori }
32221df65b6SAnthony Liguori return false;
32321df65b6SAnthony Liguori }
32421df65b6SAnthony Liguori
32575411d23SStefan Hajnoczi #endif /* BLOCK_QED_H */
326