1 /* 2 * QEMU System Emulator block driver 3 * 4 * Copyright (c) 2011 IBM Corp. 5 * Copyright (c) 2012 Red Hat, Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "config-host.h" 27 #include "qemu-common.h" 28 #include "trace.h" 29 #include "block/block.h" 30 #include "block/blockjob.h" 31 #include "block/block_int.h" 32 #include "qapi/qmp/qjson.h" 33 #include "block/coroutine.h" 34 #include "qmp-commands.h" 35 #include "qemu/timer.h" 36 #include "qapi-event.h" 37 38 void *block_job_create(const BlockJobDriver *driver, BlockDriverState *bs, 39 int64_t speed, BlockCompletionFunc *cb, 40 void *opaque, Error **errp) 41 { 42 BlockJob *job; 43 44 if (bs->job) { 45 error_set(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs)); 46 return NULL; 47 } 48 bdrv_ref(bs); 49 job = g_malloc0(driver->instance_size); 50 error_setg(&job->blocker, "block device is in use by block job: %s", 51 BlockJobType_lookup[driver->job_type]); 52 bdrv_op_block_all(bs, job->blocker); 53 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker); 54 55 job->driver = driver; 56 job->bs = bs; 57 job->cb = cb; 58 job->opaque = opaque; 59 job->busy = true; 60 bs->job = job; 61 62 /* Only set speed when necessary to avoid NotSupported error */ 63 if (speed != 0) { 64 Error *local_err = NULL; 65 66 block_job_set_speed(job, speed, &local_err); 67 if (local_err) { 68 bs->job = NULL; 69 bdrv_op_unblock_all(bs, job->blocker); 70 error_free(job->blocker); 71 g_free(job); 72 error_propagate(errp, local_err); 73 return NULL; 74 } 75 } 76 return job; 77 } 78 79 void block_job_completed(BlockJob *job, int ret) 80 { 81 BlockDriverState *bs = job->bs; 82 83 assert(bs->job == job); 84 job->cb(job->opaque, ret); 85 bs->job = NULL; 86 bdrv_op_unblock_all(bs, job->blocker); 87 error_free(job->blocker); 88 g_free(job); 89 } 90 91 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) 92 { 93 Error *local_err = NULL; 94 95 if (!job->driver->set_speed) { 96 error_set(errp, QERR_UNSUPPORTED); 97 return; 98 } 99 job->driver->set_speed(job, speed, &local_err); 100 if (local_err) { 101 error_propagate(errp, local_err); 102 return; 103 } 104 105 job->speed = speed; 106 } 107 108 void block_job_complete(BlockJob *job, Error **errp) 109 { 110 if (job->pause_count || job->cancelled || !job->driver->complete) { 111 error_set(errp, QERR_BLOCK_JOB_NOT_READY, 112 bdrv_get_device_name(job->bs)); 113 return; 114 } 115 116 job->driver->complete(job, errp); 117 } 118 119 void block_job_pause(BlockJob *job) 120 { 121 job->pause_count++; 122 } 123 124 bool block_job_is_paused(BlockJob *job) 125 { 126 return job->pause_count > 0; 127 } 128 129 void block_job_resume(BlockJob *job) 130 { 131 assert(job->pause_count > 0); 132 job->pause_count--; 133 if (job->pause_count) { 134 return; 135 } 136 block_job_enter(job); 137 } 138 139 void block_job_enter(BlockJob *job) 140 { 141 block_job_iostatus_reset(job); 142 if (job->co && !job->busy) { 143 qemu_coroutine_enter(job->co, NULL); 144 } 145 } 146 147 void block_job_cancel(BlockJob *job) 148 { 149 job->cancelled = true; 150 block_job_enter(job); 151 } 152 153 bool block_job_is_cancelled(BlockJob *job) 154 { 155 return job->cancelled; 156 } 157 158 void block_job_iostatus_reset(BlockJob *job) 159 { 160 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; 161 if (job->driver->iostatus_reset) { 162 job->driver->iostatus_reset(job); 163 } 164 } 165 166 struct BlockFinishData { 167 BlockJob *job; 168 BlockCompletionFunc *cb; 169 void *opaque; 170 bool cancelled; 171 int ret; 172 }; 173 174 static void block_job_finish_cb(void *opaque, int ret) 175 { 176 struct BlockFinishData *data = opaque; 177 178 data->cancelled = block_job_is_cancelled(data->job); 179 data->ret = ret; 180 data->cb(data->opaque, ret); 181 } 182 183 static int block_job_finish_sync(BlockJob *job, 184 void (*finish)(BlockJob *, Error **errp), 185 Error **errp) 186 { 187 struct BlockFinishData data; 188 BlockDriverState *bs = job->bs; 189 Error *local_err = NULL; 190 191 assert(bs->job == job); 192 193 /* Set up our own callback to store the result and chain to 194 * the original callback. 195 */ 196 data.job = job; 197 data.cb = job->cb; 198 data.opaque = job->opaque; 199 data.ret = -EINPROGRESS; 200 job->cb = block_job_finish_cb; 201 job->opaque = &data; 202 finish(job, &local_err); 203 if (local_err) { 204 error_propagate(errp, local_err); 205 return -EBUSY; 206 } 207 while (data.ret == -EINPROGRESS) { 208 aio_poll(bdrv_get_aio_context(bs), true); 209 } 210 return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret; 211 } 212 213 /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be 214 * used with block_job_finish_sync() without the need for (rather nasty) 215 * function pointer casts there. */ 216 static void block_job_cancel_err(BlockJob *job, Error **errp) 217 { 218 block_job_cancel(job); 219 } 220 221 int block_job_cancel_sync(BlockJob *job) 222 { 223 return block_job_finish_sync(job, &block_job_cancel_err, NULL); 224 } 225 226 int block_job_complete_sync(BlockJob *job, Error **errp) 227 { 228 return block_job_finish_sync(job, &block_job_complete, errp); 229 } 230 231 void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns) 232 { 233 assert(job->busy); 234 235 /* Check cancellation *before* setting busy = false, too! */ 236 if (block_job_is_cancelled(job)) { 237 return; 238 } 239 240 job->busy = false; 241 if (block_job_is_paused(job)) { 242 qemu_coroutine_yield(); 243 } else { 244 co_aio_sleep_ns(bdrv_get_aio_context(job->bs), type, ns); 245 } 246 job->busy = true; 247 } 248 249 void block_job_yield(BlockJob *job) 250 { 251 assert(job->busy); 252 253 /* Check cancellation *before* setting busy = false, too! */ 254 if (block_job_is_cancelled(job)) { 255 return; 256 } 257 258 job->busy = false; 259 qemu_coroutine_yield(); 260 job->busy = true; 261 } 262 263 BlockJobInfo *block_job_query(BlockJob *job) 264 { 265 BlockJobInfo *info = g_new0(BlockJobInfo, 1); 266 info->type = g_strdup(BlockJobType_lookup[job->driver->job_type]); 267 info->device = g_strdup(bdrv_get_device_name(job->bs)); 268 info->len = job->len; 269 info->busy = job->busy; 270 info->paused = job->pause_count > 0; 271 info->offset = job->offset; 272 info->speed = job->speed; 273 info->io_status = job->iostatus; 274 info->ready = job->ready; 275 return info; 276 } 277 278 static void block_job_iostatus_set_err(BlockJob *job, int error) 279 { 280 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 281 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : 282 BLOCK_DEVICE_IO_STATUS_FAILED; 283 } 284 } 285 286 void block_job_event_cancelled(BlockJob *job) 287 { 288 qapi_event_send_block_job_cancelled(job->driver->job_type, 289 bdrv_get_device_name(job->bs), 290 job->len, 291 job->offset, 292 job->speed, 293 &error_abort); 294 } 295 296 void block_job_event_completed(BlockJob *job, const char *msg) 297 { 298 qapi_event_send_block_job_completed(job->driver->job_type, 299 bdrv_get_device_name(job->bs), 300 job->len, 301 job->offset, 302 job->speed, 303 !!msg, 304 msg, 305 &error_abort); 306 } 307 308 void block_job_event_ready(BlockJob *job) 309 { 310 job->ready = true; 311 312 qapi_event_send_block_job_ready(job->driver->job_type, 313 bdrv_get_device_name(job->bs), 314 job->len, 315 job->offset, 316 job->speed, &error_abort); 317 } 318 319 BlockErrorAction block_job_error_action(BlockJob *job, BlockDriverState *bs, 320 BlockdevOnError on_err, 321 int is_read, int error) 322 { 323 BlockErrorAction action; 324 325 switch (on_err) { 326 case BLOCKDEV_ON_ERROR_ENOSPC: 327 action = (error == ENOSPC) ? 328 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; 329 break; 330 case BLOCKDEV_ON_ERROR_STOP: 331 action = BLOCK_ERROR_ACTION_STOP; 332 break; 333 case BLOCKDEV_ON_ERROR_REPORT: 334 action = BLOCK_ERROR_ACTION_REPORT; 335 break; 336 case BLOCKDEV_ON_ERROR_IGNORE: 337 action = BLOCK_ERROR_ACTION_IGNORE; 338 break; 339 default: 340 abort(); 341 } 342 qapi_event_send_block_job_error(bdrv_get_device_name(job->bs), 343 is_read ? IO_OPERATION_TYPE_READ : 344 IO_OPERATION_TYPE_WRITE, 345 action, &error_abort); 346 if (action == BLOCK_ERROR_ACTION_STOP) { 347 /* make the pause user visible, which will be resumed from QMP. */ 348 job->user_paused = true; 349 block_job_pause(job); 350 block_job_iostatus_set_err(job, error); 351 if (bs != job->bs) { 352 bdrv_iostatus_set_err(bs, error); 353 } 354 } 355 return action; 356 } 357 358 typedef struct { 359 BlockJob *job; 360 QEMUBH *bh; 361 AioContext *aio_context; 362 BlockJobDeferToMainLoopFn *fn; 363 void *opaque; 364 } BlockJobDeferToMainLoopData; 365 366 static void block_job_defer_to_main_loop_bh(void *opaque) 367 { 368 BlockJobDeferToMainLoopData *data = opaque; 369 AioContext *aio_context; 370 371 qemu_bh_delete(data->bh); 372 373 /* Prevent race with block_job_defer_to_main_loop() */ 374 aio_context_acquire(data->aio_context); 375 376 /* Fetch BDS AioContext again, in case it has changed */ 377 aio_context = bdrv_get_aio_context(data->job->bs); 378 aio_context_acquire(aio_context); 379 380 data->fn(data->job, data->opaque); 381 382 aio_context_release(aio_context); 383 384 aio_context_release(data->aio_context); 385 386 g_free(data); 387 } 388 389 void block_job_defer_to_main_loop(BlockJob *job, 390 BlockJobDeferToMainLoopFn *fn, 391 void *opaque) 392 { 393 BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data)); 394 data->job = job; 395 data->bh = qemu_bh_new(block_job_defer_to_main_loop_bh, data); 396 data->aio_context = bdrv_get_aio_context(job->bs); 397 data->fn = fn; 398 data->opaque = opaque; 399 400 qemu_bh_schedule(data->bh); 401 } 402