1 /* 2 * QEMU Enhanced Disk Format Table I/O 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "block/block-io.h" 17 #include "trace.h" 18 #include "qemu/sockets.h" /* for EINPROGRESS on Windows */ 19 #include "qed.h" 20 #include "qemu/bswap.h" 21 #include "qemu/memalign.h" 22 23 /* Called with table_lock held. */ 24 static int coroutine_fn GRAPH_RDLOCK 25 qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table) 26 { 27 unsigned int bytes = s->header.cluster_size * s->header.table_size; 28 29 int noffsets; 30 int i, ret; 31 32 trace_qed_read_table(s, offset, table); 33 34 qemu_co_mutex_unlock(&s->table_lock); 35 ret = bdrv_co_pread(s->bs->file, offset, bytes, table->offsets, 0); 36 qemu_co_mutex_lock(&s->table_lock); 37 if (ret < 0) { 38 goto out; 39 } 40 41 /* Byteswap offsets */ 42 noffsets = bytes / sizeof(uint64_t); 43 for (i = 0; i < noffsets; i++) { 44 table->offsets[i] = le64_to_cpu(table->offsets[i]); 45 } 46 47 ret = 0; 48 out: 49 /* Completion */ 50 trace_qed_read_table_cb(s, table, ret); 51 return ret; 52 } 53 54 /** 55 * Write out an updated part or all of a table 56 * 57 * @s: QED state 58 * @offset: Offset of table in image file, in bytes 59 * @table: Table 60 * @index: Index of first element 61 * @n: Number of elements 62 * @flush: Whether or not to sync to disk 63 * 64 * Called with table_lock held. 65 */ 66 static int coroutine_fn GRAPH_RDLOCK 67 qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table, 68 unsigned int index, unsigned int n, bool flush) 69 { 70 unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1; 71 unsigned int start, end, i; 72 QEDTable *new_table; 73 size_t len_bytes; 74 int ret; 75 76 trace_qed_write_table(s, offset, table, index, n); 77 78 /* Calculate indices of the first and one after last elements */ 79 start = index & ~sector_mask; 80 end = (index + n + sector_mask) & ~sector_mask; 81 82 len_bytes = (end - start) * sizeof(uint64_t); 83 84 new_table = qemu_blockalign(s->bs, len_bytes); 85 86 /* Byteswap table */ 87 for (i = start; i < end; i++) { 88 uint64_t le_offset = cpu_to_le64(table->offsets[i]); 89 new_table->offsets[i - start] = le_offset; 90 } 91 92 /* Adjust for offset into table */ 93 offset += start * sizeof(uint64_t); 94 95 qemu_co_mutex_unlock(&s->table_lock); 96 ret = bdrv_co_pwrite(s->bs->file, offset, len_bytes, new_table->offsets, 0); 97 qemu_co_mutex_lock(&s->table_lock); 98 trace_qed_write_table_cb(s, table, flush, ret); 99 if (ret < 0) { 100 goto out; 101 } 102 103 if (flush) { 104 ret = bdrv_co_flush(s->bs); 105 if (ret < 0) { 106 goto out; 107 } 108 } 109 110 ret = 0; 111 out: 112 qemu_vfree(new_table); 113 return ret; 114 } 115 116 int coroutine_fn qed_read_l1_table_sync(BDRVQEDState *s) 117 { 118 return qed_read_table(s, s->header.l1_table_offset, s->l1_table); 119 } 120 121 /* Called with table_lock held. */ 122 int coroutine_fn qed_write_l1_table(BDRVQEDState *s, unsigned int index, 123 unsigned int n) 124 { 125 BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE); 126 return qed_write_table(s, s->header.l1_table_offset, 127 s->l1_table, index, n, false); 128 } 129 130 int coroutine_fn qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index, 131 unsigned int n) 132 { 133 return qed_write_l1_table(s, index, n); 134 } 135 136 /* Called with table_lock held. */ 137 int coroutine_fn qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, 138 uint64_t offset) 139 { 140 int ret; 141 142 qed_unref_l2_cache_entry(request->l2_table); 143 144 /* Check for cached L2 entry */ 145 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset); 146 if (request->l2_table) { 147 return 0; 148 } 149 150 request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); 151 request->l2_table->table = qed_alloc_table(s); 152 153 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD); 154 ret = qed_read_table(s, offset, request->l2_table->table); 155 156 if (ret) { 157 /* can't trust loaded L2 table anymore */ 158 qed_unref_l2_cache_entry(request->l2_table); 159 request->l2_table = NULL; 160 } else { 161 request->l2_table->offset = offset; 162 163 qed_commit_l2_cache_entry(&s->l2_cache, request->l2_table); 164 165 /* This is guaranteed to succeed because we just committed the entry 166 * to the cache. 167 */ 168 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset); 169 assert(request->l2_table != NULL); 170 } 171 172 return ret; 173 } 174 175 int coroutine_fn qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, 176 uint64_t offset) 177 { 178 return qed_read_l2_table(s, request, offset); 179 } 180 181 /* Called with table_lock held. */ 182 int coroutine_fn qed_write_l2_table(BDRVQEDState *s, QEDRequest *request, 183 unsigned int index, unsigned int n, 184 bool flush) 185 { 186 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE); 187 return qed_write_table(s, request->l2_table->offset, 188 request->l2_table->table, index, n, flush); 189 } 190 191 int coroutine_fn qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request, 192 unsigned int index, unsigned int n, 193 bool flush) 194 { 195 return qed_write_l2_table(s, request, index, n, flush); 196 } 197