1 /* 2 * QEMU Enhanced Disk Format Table I/O 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "trace.h" 17 #include "qemu/sockets.h" /* for EINPROGRESS on Windows */ 18 #include "qed.h" 19 #include "qemu/bswap.h" 20 21 static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table) 22 { 23 QEMUIOVector qiov; 24 int noffsets; 25 int i, ret; 26 27 struct iovec iov = { 28 .iov_base = table->offsets, 29 .iov_len = s->header.cluster_size * s->header.table_size, 30 }; 31 qemu_iovec_init_external(&qiov, &iov, 1); 32 33 trace_qed_read_table(s, offset, table); 34 35 ret = bdrv_preadv(s->bs->file, offset, &qiov); 36 if (ret < 0) { 37 goto out; 38 } 39 40 /* Byteswap offsets */ 41 qed_acquire(s); 42 noffsets = qiov.size / sizeof(uint64_t); 43 for (i = 0; i < noffsets; i++) { 44 table->offsets[i] = le64_to_cpu(table->offsets[i]); 45 } 46 qed_release(s); 47 48 ret = 0; 49 out: 50 /* Completion */ 51 trace_qed_read_table_cb(s, table, ret); 52 return ret; 53 } 54 55 /** 56 * Write out an updated part or all of a table 57 * 58 * @s: QED state 59 * @offset: Offset of table in image file, in bytes 60 * @table: Table 61 * @index: Index of first element 62 * @n: Number of elements 63 * @flush: Whether or not to sync to disk 64 */ 65 static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table, 66 unsigned int index, unsigned int n, bool flush) 67 { 68 unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1; 69 unsigned int start, end, i; 70 QEDTable *new_table; 71 struct iovec iov; 72 QEMUIOVector qiov; 73 size_t len_bytes; 74 int ret; 75 76 trace_qed_write_table(s, offset, table, index, n); 77 78 /* Calculate indices of the first and one after last elements */ 79 start = index & ~sector_mask; 80 end = (index + n + sector_mask) & ~sector_mask; 81 82 len_bytes = (end - start) * sizeof(uint64_t); 83 84 new_table = qemu_blockalign(s->bs, len_bytes); 85 iov = (struct iovec) { 86 .iov_base = new_table->offsets, 87 .iov_len = len_bytes, 88 }; 89 qemu_iovec_init_external(&qiov, &iov, 1); 90 91 /* Byteswap table */ 92 for (i = start; i < end; i++) { 93 uint64_t le_offset = cpu_to_le64(table->offsets[i]); 94 new_table->offsets[i - start] = le_offset; 95 } 96 97 /* Adjust for offset into table */ 98 offset += start * sizeof(uint64_t); 99 100 ret = bdrv_pwritev(s->bs->file, offset, &qiov); 101 trace_qed_write_table_cb(s, table, flush, ret); 102 if (ret < 0) { 103 goto out; 104 } 105 106 if (flush) { 107 qed_acquire(s); 108 ret = bdrv_flush(s->bs); 109 qed_release(s); 110 if (ret < 0) { 111 goto out; 112 } 113 } 114 115 ret = 0; 116 out: 117 qemu_vfree(new_table); 118 return ret; 119 } 120 121 int qed_read_l1_table_sync(BDRVQEDState *s) 122 { 123 return qed_read_table(s, s->header.l1_table_offset, s->l1_table); 124 } 125 126 int qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n) 127 { 128 BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE); 129 return qed_write_table(s, s->header.l1_table_offset, 130 s->l1_table, index, n, false); 131 } 132 133 int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index, 134 unsigned int n) 135 { 136 return qed_write_l1_table(s, index, n); 137 } 138 139 int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset) 140 { 141 int ret; 142 143 qed_unref_l2_cache_entry(request->l2_table); 144 145 /* Check for cached L2 entry */ 146 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset); 147 if (request->l2_table) { 148 return 0; 149 } 150 151 request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); 152 request->l2_table->table = qed_alloc_table(s); 153 154 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD); 155 ret = qed_read_table(s, offset, request->l2_table->table); 156 157 qed_acquire(s); 158 if (ret) { 159 /* can't trust loaded L2 table anymore */ 160 qed_unref_l2_cache_entry(request->l2_table); 161 request->l2_table = NULL; 162 } else { 163 request->l2_table->offset = offset; 164 165 qed_commit_l2_cache_entry(&s->l2_cache, request->l2_table); 166 167 /* This is guaranteed to succeed because we just committed the entry 168 * to the cache. 169 */ 170 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset); 171 assert(request->l2_table != NULL); 172 } 173 qed_release(s); 174 175 return ret; 176 } 177 178 int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset) 179 { 180 return qed_read_l2_table(s, request, offset); 181 } 182 183 int qed_write_l2_table(BDRVQEDState *s, QEDRequest *request, 184 unsigned int index, unsigned int n, bool flush) 185 { 186 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE); 187 return qed_write_table(s, request->l2_table->offset, 188 request->l2_table->table, index, n, flush); 189 } 190 191 int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request, 192 unsigned int index, unsigned int n, bool flush) 193 { 194 return qed_write_l2_table(s, request, index, n, flush); 195 } 196