1 /* 2 * QEMU Enhanced Disk Format Table I/O 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "trace.h" 17 #include "qemu/sockets.h" /* for EINPROGRESS on Windows */ 18 #include "qed.h" 19 #include "qemu/bswap.h" 20 #include "qemu/memalign.h" 21 22 /* Called with table_lock held. */ 23 static int coroutine_fn qed_read_table(BDRVQEDState *s, uint64_t offset, 24 QEDTable *table) 25 { 26 unsigned int bytes = s->header.cluster_size * s->header.table_size; 27 28 int noffsets; 29 int i, ret; 30 31 trace_qed_read_table(s, offset, table); 32 33 qemu_co_mutex_unlock(&s->table_lock); 34 ret = bdrv_co_pread(s->bs->file, offset, bytes, table->offsets, 0); 35 qemu_co_mutex_lock(&s->table_lock); 36 if (ret < 0) { 37 goto out; 38 } 39 40 /* Byteswap offsets */ 41 noffsets = bytes / sizeof(uint64_t); 42 for (i = 0; i < noffsets; i++) { 43 table->offsets[i] = le64_to_cpu(table->offsets[i]); 44 } 45 46 ret = 0; 47 out: 48 /* Completion */ 49 trace_qed_read_table_cb(s, table, ret); 50 return ret; 51 } 52 53 /** 54 * Write out an updated part or all of a table 55 * 56 * @s: QED state 57 * @offset: Offset of table in image file, in bytes 58 * @table: Table 59 * @index: Index of first element 60 * @n: Number of elements 61 * @flush: Whether or not to sync to disk 62 * 63 * Called with table_lock held. 64 */ 65 static int coroutine_fn qed_write_table(BDRVQEDState *s, uint64_t offset, 66 QEDTable *table, unsigned int index, 67 unsigned int n, bool flush) 68 { 69 unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1; 70 unsigned int start, end, i; 71 QEDTable *new_table; 72 size_t len_bytes; 73 int ret; 74 75 trace_qed_write_table(s, offset, table, index, n); 76 77 /* Calculate indices of the first and one after last elements */ 78 start = index & ~sector_mask; 79 end = (index + n + sector_mask) & ~sector_mask; 80 81 len_bytes = (end - start) * sizeof(uint64_t); 82 83 new_table = qemu_blockalign(s->bs, len_bytes); 84 85 /* Byteswap table */ 86 for (i = start; i < end; i++) { 87 uint64_t le_offset = cpu_to_le64(table->offsets[i]); 88 new_table->offsets[i - start] = le_offset; 89 } 90 91 /* Adjust for offset into table */ 92 offset += start * sizeof(uint64_t); 93 94 qemu_co_mutex_unlock(&s->table_lock); 95 ret = bdrv_co_pwrite(s->bs->file, offset, len_bytes, new_table->offsets, 0); 96 qemu_co_mutex_lock(&s->table_lock); 97 trace_qed_write_table_cb(s, table, flush, ret); 98 if (ret < 0) { 99 goto out; 100 } 101 102 if (flush) { 103 ret = bdrv_flush(s->bs); 104 if (ret < 0) { 105 goto out; 106 } 107 } 108 109 ret = 0; 110 out: 111 qemu_vfree(new_table); 112 return ret; 113 } 114 115 int coroutine_fn qed_read_l1_table_sync(BDRVQEDState *s) 116 { 117 return qed_read_table(s, s->header.l1_table_offset, s->l1_table); 118 } 119 120 /* Called with table_lock held. */ 121 int coroutine_fn qed_write_l1_table(BDRVQEDState *s, unsigned int index, 122 unsigned int n) 123 { 124 BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE); 125 return qed_write_table(s, s->header.l1_table_offset, 126 s->l1_table, index, n, false); 127 } 128 129 int coroutine_fn qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index, 130 unsigned int n) 131 { 132 return qed_write_l1_table(s, index, n); 133 } 134 135 /* Called with table_lock held. */ 136 int coroutine_fn qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, 137 uint64_t offset) 138 { 139 int ret; 140 141 qed_unref_l2_cache_entry(request->l2_table); 142 143 /* Check for cached L2 entry */ 144 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset); 145 if (request->l2_table) { 146 return 0; 147 } 148 149 request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); 150 request->l2_table->table = qed_alloc_table(s); 151 152 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD); 153 ret = qed_read_table(s, offset, request->l2_table->table); 154 155 if (ret) { 156 /* can't trust loaded L2 table anymore */ 157 qed_unref_l2_cache_entry(request->l2_table); 158 request->l2_table = NULL; 159 } else { 160 request->l2_table->offset = offset; 161 162 qed_commit_l2_cache_entry(&s->l2_cache, request->l2_table); 163 164 /* This is guaranteed to succeed because we just committed the entry 165 * to the cache. 166 */ 167 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset); 168 assert(request->l2_table != NULL); 169 } 170 171 return ret; 172 } 173 174 int coroutine_fn qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, 175 uint64_t offset) 176 { 177 return qed_read_l2_table(s, request, offset); 178 } 179 180 /* Called with table_lock held. */ 181 int coroutine_fn qed_write_l2_table(BDRVQEDState *s, QEDRequest *request, 182 unsigned int index, unsigned int n, 183 bool flush) 184 { 185 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE); 186 return qed_write_table(s, request->l2_table->offset, 187 request->l2_table->table, index, n, flush); 188 } 189 190 int coroutine_fn qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request, 191 unsigned int index, unsigned int n, 192 bool flush) 193 { 194 return qed_write_l2_table(s, request, index, n, flush); 195 } 196