qed.c (a101341aa07237fa85907b1dcafd97add47a3875) | qed.c (d6daddcdeb2c0c7d443cb039e798a1671dafdd0d) |
---|---|
1/* 2 * QEMU Enhanced Disk Format 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> --- 1104 unchanged lines hidden (view full) --- 1113/** 1114 * Write new data cluster 1115 * 1116 * @acb: Write request 1117 * @len: Length in bytes 1118 * 1119 * This path is taken when writing to previously unallocated clusters. 1120 */ | 1/* 2 * QEMU Enhanced Disk Format 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> --- 1104 unchanged lines hidden (view full) --- 1113/** 1114 * Write new data cluster 1115 * 1116 * @acb: Write request 1117 * @len: Length in bytes 1118 * 1119 * This path is taken when writing to previously unallocated clusters. 1120 */ |
1121static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) | 1121static int qed_aio_write_alloc(QEDAIOCB *acb, size_t len) |
1122{ 1123 BDRVQEDState *s = acb_to_s(acb); 1124 int ret; 1125 1126 /* Cancel timer when the first allocating request comes in */ 1127 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) { 1128 qed_cancel_need_check_timer(s); 1129 } 1130 1131 /* Freeze this request if another allocating write is in progress */ 1132 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 1133 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); 1134 } 1135 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) || 1136 s->allocating_write_reqs_plugged) { | 1122{ 1123 BDRVQEDState *s = acb_to_s(acb); 1124 int ret; 1125 1126 /* Cancel timer when the first allocating request comes in */ 1127 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) { 1128 qed_cancel_need_check_timer(s); 1129 } 1130 1131 /* Freeze this request if another allocating write is in progress */ 1132 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 1133 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); 1134 } 1135 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) || 1136 s->allocating_write_reqs_plugged) { |
1137 return; /* wait for existing request to finish */ | 1137 return -EINPROGRESS; /* wait for existing request to finish */ |
1138 } 1139 1140 acb->cur_nclusters = qed_bytes_to_clusters(s, 1141 qed_offset_into_cluster(s, acb->cur_pos) + len); 1142 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1143 1144 if (acb->flags & QED_AIOCB_ZERO) { 1145 /* Skip ahead if the clusters are already zero */ 1146 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { | 1138 } 1139 1140 acb->cur_nclusters = qed_bytes_to_clusters(s, 1141 qed_offset_into_cluster(s, acb->cur_pos) + len); 1142 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1143 1144 if (acb->flags & QED_AIOCB_ZERO) { 1145 /* Skip ahead if the clusters are already zero */ 1146 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { |
1147 qed_aio_start_io(acb); 1148 return; | 1147 return 0; |
1149 } 1150 } else { 1151 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); 1152 } 1153 1154 if (qed_should_set_need_check(s)) { 1155 s->header.features |= QED_F_NEED_CHECK; 1156 ret = qed_write_header(s); 1157 if (ret < 0) { | 1148 } 1149 } else { 1150 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); 1151 } 1152 1153 if (qed_should_set_need_check(s)) { 1154 s->header.features |= QED_F_NEED_CHECK; 1155 ret = qed_write_header(s); 1156 if (ret < 0) { |
1158 qed_aio_complete(acb, ret); 1159 return; | 1157 return ret; |
1160 } 1161 } 1162 1163 if (acb->flags & QED_AIOCB_ZERO) { 1164 ret = qed_aio_write_l2_update(acb, 1); 1165 } else { 1166 ret = qed_aio_write_cow(acb); 1167 } 1168 if (ret < 0) { | 1158 } 1159 } 1160 1161 if (acb->flags & QED_AIOCB_ZERO) { 1162 ret = qed_aio_write_l2_update(acb, 1); 1163 } else { 1164 ret = qed_aio_write_cow(acb); 1165 } 1166 if (ret < 0) { |
1169 qed_aio_complete(acb, ret); 1170 return; | 1167 return ret; |
1171 } | 1168 } |
1172 qed_aio_next_io(acb, 0); | 1169 return 0; |
1173} 1174 1175/** 1176 * Write data cluster in place 1177 * 1178 * @acb: Write request 1179 * @offset: Cluster offset in bytes 1180 * @len: Length in bytes 1181 * 1182 * This path is taken when writing to already allocated clusters. 1183 */ | 1170} 1171 1172/** 1173 * Write data cluster in place 1174 * 1175 * @acb: Write request 1176 * @offset: Cluster offset in bytes 1177 * @len: Length in bytes 1178 * 1179 * This path is taken when writing to already allocated clusters. 1180 */ |
1184static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) | 1181static int qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) |
1185{ | 1182{ |
1186 int ret; 1187 | |
1188 /* Allocate buffer for zero writes */ 1189 if (acb->flags & QED_AIOCB_ZERO) { 1190 struct iovec *iov = acb->qiov->iov; 1191 1192 if (!iov->iov_base) { 1193 iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len); 1194 if (iov->iov_base == NULL) { | 1183 /* Allocate buffer for zero writes */ 1184 if (acb->flags & QED_AIOCB_ZERO) { 1185 struct iovec *iov = acb->qiov->iov; 1186 1187 if (!iov->iov_base) { 1188 iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len); 1189 if (iov->iov_base == NULL) { |
1195 qed_aio_complete(acb, -ENOMEM); 1196 return; | 1190 return -ENOMEM; |
1197 } 1198 memset(iov->iov_base, 0, iov->iov_len); 1199 } 1200 } 1201 1202 /* Calculate the I/O vector */ 1203 acb->cur_cluster = offset; 1204 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1205 1206 /* Do the actual write */ | 1191 } 1192 memset(iov->iov_base, 0, iov->iov_len); 1193 } 1194 } 1195 1196 /* Calculate the I/O vector */ 1197 acb->cur_cluster = offset; 1198 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1199 1200 /* Do the actual write */ |
1207 ret = qed_aio_write_main(acb); 1208 if (ret < 0) { 1209 qed_aio_complete(acb, ret); 1210 return; 1211 } 1212 qed_aio_next_io(acb, 0); | 1201 return qed_aio_write_main(acb); |
1213} 1214 1215/** 1216 * Write data cluster 1217 * 1218 * @opaque: Write request 1219 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1220 * or -errno --- 6 unchanged lines hidden (view full) --- 1227 QEDAIOCB *acb = opaque; 1228 1229 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); 1230 1231 acb->find_cluster_ret = ret; 1232 1233 switch (ret) { 1234 case QED_CLUSTER_FOUND: | 1202} 1203 1204/** 1205 * Write data cluster 1206 * 1207 * @opaque: Write request 1208 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1209 * or -errno --- 6 unchanged lines hidden (view full) --- 1216 QEDAIOCB *acb = opaque; 1217 1218 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); 1219 1220 acb->find_cluster_ret = ret; 1221 1222 switch (ret) { 1223 case QED_CLUSTER_FOUND: |
1235 qed_aio_write_inplace(acb, offset, len); | 1224 ret = qed_aio_write_inplace(acb, offset, len); |
1236 break; 1237 1238 case QED_CLUSTER_L2: 1239 case QED_CLUSTER_L1: 1240 case QED_CLUSTER_ZERO: | 1225 break; 1226 1227 case QED_CLUSTER_L2: 1228 case QED_CLUSTER_L1: 1229 case QED_CLUSTER_ZERO: |
1241 qed_aio_write_alloc(acb, len); | 1230 ret = qed_aio_write_alloc(acb, len); |
1242 break; 1243 1244 default: | 1231 break; 1232 1233 default: |
1245 qed_aio_complete(acb, ret); | 1234 assert(ret < 0); |
1246 break; 1247 } | 1235 break; 1236 } |
1237 1238 if (ret < 0) { 1239 if (ret != -EINPROGRESS) { 1240 qed_aio_complete(acb, ret); 1241 } 1242 return; 1243 } 1244 qed_aio_next_io(acb, 0); |
|
1248} 1249 1250/** 1251 * Read data cluster 1252 * 1253 * @opaque: Read request 1254 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1255 * or -errno --- 400 unchanged lines hidden --- | 1245} 1246 1247/** 1248 * Read data cluster 1249 * 1250 * @opaque: Read request 1251 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1252 * or -errno --- 400 unchanged lines hidden --- |