xref: /openbmc/qemu/blockjob.c (revision ee95fae0)
1 /*
2  * QEMU System Emulator block driver
3  *
4  * Copyright (c) 2011 IBM Corp.
5  * Copyright (c) 2012 Red Hat, Inc.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "block/aio-wait.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/timer.h"
38 
39 static bool is_block_job(Job *job)
40 {
41     return job_type(job) == JOB_TYPE_BACKUP ||
42            job_type(job) == JOB_TYPE_COMMIT ||
43            job_type(job) == JOB_TYPE_MIRROR ||
44            job_type(job) == JOB_TYPE_STREAM;
45 }
46 
47 BlockJob *block_job_next_locked(BlockJob *bjob)
48 {
49     Job *job = bjob ? &bjob->job : NULL;
50     GLOBAL_STATE_CODE();
51 
52     do {
53         job = job_next_locked(job);
54     } while (job && !is_block_job(job));
55 
56     return job ? container_of(job, BlockJob, job) : NULL;
57 }
58 
59 BlockJob *block_job_get_locked(const char *id)
60 {
61     Job *job = job_get_locked(id);
62     GLOBAL_STATE_CODE();
63 
64     if (job && is_block_job(job)) {
65         return container_of(job, BlockJob, job);
66     } else {
67         return NULL;
68     }
69 }
70 
71 BlockJob *block_job_get(const char *id)
72 {
73     JOB_LOCK_GUARD();
74     return block_job_get_locked(id);
75 }
76 
77 void block_job_free(Job *job)
78 {
79     BlockJob *bjob = container_of(job, BlockJob, job);
80     GLOBAL_STATE_CODE();
81 
82     block_job_remove_all_bdrv(bjob);
83     ratelimit_destroy(&bjob->limit);
84     error_free(bjob->blocker);
85 }
86 
87 static char *child_job_get_parent_desc(BdrvChild *c)
88 {
89     BlockJob *job = c->opaque;
90     return g_strdup_printf("%s job '%s'", job_type_str(&job->job), job->job.id);
91 }
92 
93 static void child_job_drained_begin(BdrvChild *c)
94 {
95     BlockJob *job = c->opaque;
96     job_pause(&job->job);
97 }
98 
99 static bool child_job_drained_poll(BdrvChild *c)
100 {
101     BlockJob *bjob = c->opaque;
102     Job *job = &bjob->job;
103     const BlockJobDriver *drv = block_job_driver(bjob);
104 
105     /* An inactive or completed job doesn't have any pending requests. Jobs
106      * with !job->busy are either already paused or have a pause point after
107      * being reentered, so no job driver code will run before they pause. */
108     WITH_JOB_LOCK_GUARD() {
109         if (!job->busy || job_is_completed_locked(job)) {
110             return false;
111         }
112     }
113 
114     /* Otherwise, assume that it isn't fully stopped yet, but allow the job to
115      * override this assumption. */
116     if (drv->drained_poll) {
117         return drv->drained_poll(bjob);
118     } else {
119         return true;
120     }
121 }
122 
123 static void child_job_drained_end(BdrvChild *c)
124 {
125     BlockJob *job = c->opaque;
126     job_resume(&job->job);
127 }
128 
129 typedef struct BdrvStateChildJobContext {
130     AioContext *new_ctx;
131     BlockJob *job;
132 } BdrvStateChildJobContext;
133 
134 static void child_job_set_aio_ctx_commit(void *opaque)
135 {
136     BdrvStateChildJobContext *s = opaque;
137     BlockJob *job = s->job;
138 
139     job_set_aio_context(&job->job, s->new_ctx);
140 }
141 
142 static TransactionActionDrv change_child_job_context = {
143     .commit = child_job_set_aio_ctx_commit,
144     .clean = g_free,
145 };
146 
147 static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx,
148                                      GHashTable *visited, Transaction *tran,
149                                      Error **errp)
150 {
151     BlockJob *job = c->opaque;
152     BdrvStateChildJobContext *s;
153     GSList *l;
154 
155     for (l = job->nodes; l; l = l->next) {
156         BdrvChild *sibling = l->data;
157         if (!bdrv_child_change_aio_context(sibling, ctx, visited,
158                                            tran, errp)) {
159             return false;
160         }
161     }
162 
163     s = g_new(BdrvStateChildJobContext, 1);
164     *s = (BdrvStateChildJobContext) {
165         .new_ctx = ctx,
166         .job = job,
167     };
168 
169     tran_add(tran, &change_child_job_context, s);
170     return true;
171 }
172 
173 static AioContext *child_job_get_parent_aio_context(BdrvChild *c)
174 {
175     BlockJob *job = c->opaque;
176     IO_CODE();
177     JOB_LOCK_GUARD();
178 
179     return job->job.aio_context;
180 }
181 
182 static const BdrvChildClass child_job = {
183     .get_parent_desc    = child_job_get_parent_desc,
184     .drained_begin      = child_job_drained_begin,
185     .drained_poll       = child_job_drained_poll,
186     .drained_end        = child_job_drained_end,
187     .change_aio_ctx     = child_job_change_aio_ctx,
188     .stay_at_node       = true,
189     .get_parent_aio_context = child_job_get_parent_aio_context,
190 };
191 
192 void block_job_remove_all_bdrv(BlockJob *job)
193 {
194     GLOBAL_STATE_CODE();
195     /*
196      * bdrv_root_unref_child() may reach child_job_[can_]set_aio_ctx(),
197      * which will also traverse job->nodes, so consume the list one by
198      * one to make sure that such a concurrent access does not attempt
199      * to process an already freed BdrvChild.
200      */
201     bdrv_graph_wrlock(NULL);
202     while (job->nodes) {
203         GSList *l = job->nodes;
204         BdrvChild *c = l->data;
205 
206         job->nodes = l->next;
207 
208         bdrv_op_unblock_all(c->bs, job->blocker);
209         bdrv_root_unref_child(c);
210 
211         g_slist_free_1(l);
212     }
213     bdrv_graph_wrunlock();
214 }
215 
216 bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
217 {
218     GSList *el;
219     GLOBAL_STATE_CODE();
220 
221     for (el = job->nodes; el; el = el->next) {
222         BdrvChild *c = el->data;
223         if (c->bs == bs) {
224             return true;
225         }
226     }
227 
228     return false;
229 }
230 
231 int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
232                        uint64_t perm, uint64_t shared_perm, Error **errp)
233 {
234     BdrvChild *c;
235     AioContext *ctx = bdrv_get_aio_context(bs);
236     bool need_context_ops;
237     GLOBAL_STATE_CODE();
238 
239     bdrv_ref(bs);
240 
241     need_context_ops = ctx != job->job.aio_context;
242 
243     if (need_context_ops) {
244         if (job->job.aio_context != qemu_get_aio_context()) {
245             aio_context_release(job->job.aio_context);
246         }
247         aio_context_acquire(ctx);
248     }
249     c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
250                                errp);
251     if (need_context_ops) {
252         aio_context_release(ctx);
253         if (job->job.aio_context != qemu_get_aio_context()) {
254             aio_context_acquire(job->job.aio_context);
255         }
256     }
257     if (c == NULL) {
258         return -EPERM;
259     }
260 
261     job->nodes = g_slist_prepend(job->nodes, c);
262     bdrv_op_block_all(bs, job->blocker);
263 
264     return 0;
265 }
266 
267 /* Called with job_mutex lock held. */
268 static void block_job_on_idle_locked(Notifier *n, void *opaque)
269 {
270     aio_wait_kick();
271 }
272 
273 bool block_job_is_internal(BlockJob *job)
274 {
275     return (job->job.id == NULL);
276 }
277 
278 const BlockJobDriver *block_job_driver(BlockJob *job)
279 {
280     return container_of(job->job.driver, BlockJobDriver, job_driver);
281 }
282 
283 /* Assumes the job_mutex is held */
284 static bool job_timer_pending(Job *job)
285 {
286     return timer_pending(&job->sleep_timer);
287 }
288 
289 bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp)
290 {
291     const BlockJobDriver *drv = block_job_driver(job);
292     int64_t old_speed = job->speed;
293 
294     GLOBAL_STATE_CODE();
295 
296     if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
297         return false;
298     }
299     if (speed < 0) {
300         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "speed",
301                    "a non-negative value");
302         return false;
303     }
304 
305     ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME);
306 
307     job->speed = speed;
308 
309     if (drv->set_speed) {
310         job_unlock();
311         drv->set_speed(job, speed);
312         job_lock();
313     }
314 
315     if (speed && speed <= old_speed) {
316         return true;
317     }
318 
319     /* kick only if a timer is pending */
320     job_enter_cond_locked(&job->job, job_timer_pending);
321 
322     return true;
323 }
324 
325 static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
326 {
327     JOB_LOCK_GUARD();
328     return block_job_set_speed_locked(job, speed, errp);
329 }
330 
331 void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n)
332 {
333     IO_CODE();
334     ratelimit_calculate_delay(&job->limit, n);
335 }
336 
337 void block_job_ratelimit_sleep(BlockJob *job)
338 {
339     uint64_t delay_ns;
340 
341     /*
342      * Sleep at least once. If the job is reentered early, keep waiting until
343      * we've waited for the full time that is necessary to keep the job at the
344      * right speed.
345      *
346      * Make sure to recalculate the delay after each (possibly interrupted)
347      * sleep because the speed can change while the job has yielded.
348      */
349     do {
350         delay_ns = ratelimit_calculate_delay(&job->limit, 0);
351         job_sleep_ns(&job->job, delay_ns);
352     } while (delay_ns && !job_is_cancelled(&job->job));
353 }
354 
355 BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
356 {
357     BlockJobInfo *info;
358     uint64_t progress_current, progress_total;
359 
360     GLOBAL_STATE_CODE();
361 
362     if (block_job_is_internal(job)) {
363         error_setg(errp, "Cannot query QEMU internal jobs");
364         return NULL;
365     }
366 
367     progress_get_snapshot(&job->job.progress, &progress_current,
368                           &progress_total);
369 
370     info = g_new0(BlockJobInfo, 1);
371     info->type      = g_strdup(job_type_str(&job->job));
372     info->device    = g_strdup(job->job.id);
373     info->busy      = job->job.busy;
374     info->paused    = job->job.pause_count > 0;
375     info->offset    = progress_current;
376     info->len       = progress_total;
377     info->speed     = job->speed;
378     info->io_status = job->iostatus;
379     info->ready     = job_is_ready_locked(&job->job),
380     info->status    = job->job.status;
381     info->auto_finalize = job->job.auto_finalize;
382     info->auto_dismiss  = job->job.auto_dismiss;
383     if (job->job.ret) {
384         info->error = job->job.err ?
385                         g_strdup(error_get_pretty(job->job.err)) :
386                         g_strdup(strerror(-job->job.ret));
387     }
388     return info;
389 }
390 
391 /* Called with job lock held */
392 static void block_job_iostatus_set_err_locked(BlockJob *job, int error)
393 {
394     if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
395         job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
396                                           BLOCK_DEVICE_IO_STATUS_FAILED;
397     }
398 }
399 
400 /* Called with job_mutex lock held. */
401 static void block_job_event_cancelled_locked(Notifier *n, void *opaque)
402 {
403     BlockJob *job = opaque;
404     uint64_t progress_current, progress_total;
405 
406     if (block_job_is_internal(job)) {
407         return;
408     }
409 
410     progress_get_snapshot(&job->job.progress, &progress_current,
411                           &progress_total);
412 
413     qapi_event_send_block_job_cancelled(job_type(&job->job),
414                                         job->job.id,
415                                         progress_total,
416                                         progress_current,
417                                         job->speed);
418 }
419 
420 /* Called with job_mutex lock held. */
421 static void block_job_event_completed_locked(Notifier *n, void *opaque)
422 {
423     BlockJob *job = opaque;
424     const char *msg = NULL;
425     uint64_t progress_current, progress_total;
426 
427     if (block_job_is_internal(job)) {
428         return;
429     }
430 
431     if (job->job.ret < 0) {
432         msg = error_get_pretty(job->job.err);
433     }
434 
435     progress_get_snapshot(&job->job.progress, &progress_current,
436                           &progress_total);
437 
438     qapi_event_send_block_job_completed(job_type(&job->job),
439                                         job->job.id,
440                                         progress_total,
441                                         progress_current,
442                                         job->speed,
443                                         msg);
444 }
445 
446 /* Called with job_mutex lock held. */
447 static void block_job_event_pending_locked(Notifier *n, void *opaque)
448 {
449     BlockJob *job = opaque;
450 
451     if (block_job_is_internal(job)) {
452         return;
453     }
454 
455     qapi_event_send_block_job_pending(job_type(&job->job),
456                                       job->job.id);
457 }
458 
459 /* Called with job_mutex lock held. */
460 static void block_job_event_ready_locked(Notifier *n, void *opaque)
461 {
462     BlockJob *job = opaque;
463     uint64_t progress_current, progress_total;
464 
465     if (block_job_is_internal(job)) {
466         return;
467     }
468 
469     progress_get_snapshot(&job->job.progress, &progress_current,
470                           &progress_total);
471 
472     qapi_event_send_block_job_ready(job_type(&job->job),
473                                     job->job.id,
474                                     progress_total,
475                                     progress_current,
476                                     job->speed);
477 }
478 
479 
480 void *block_job_create(const char *job_id, const BlockJobDriver *driver,
481                        JobTxn *txn, BlockDriverState *bs, uint64_t perm,
482                        uint64_t shared_perm, int64_t speed, int flags,
483                        BlockCompletionFunc *cb, void *opaque, Error **errp)
484 {
485     BlockJob *job;
486     int ret;
487     GLOBAL_STATE_CODE();
488     GRAPH_RDLOCK_GUARD_MAINLOOP();
489 
490     if (job_id == NULL && !(flags & JOB_INTERNAL)) {
491         job_id = bdrv_get_device_name(bs);
492     }
493 
494     job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
495                      flags, cb, opaque, errp);
496     if (job == NULL) {
497         return NULL;
498     }
499 
500     assert(is_block_job(&job->job));
501     assert(job->job.driver->free == &block_job_free);
502     assert(job->job.driver->user_resume == &block_job_user_resume);
503 
504     ratelimit_init(&job->limit);
505 
506     job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked;
507     job->finalize_completed_notifier.notify = block_job_event_completed_locked;
508     job->pending_notifier.notify = block_job_event_pending_locked;
509     job->ready_notifier.notify = block_job_event_ready_locked;
510     job->idle_notifier.notify = block_job_on_idle_locked;
511 
512     WITH_JOB_LOCK_GUARD() {
513         notifier_list_add(&job->job.on_finalize_cancelled,
514                           &job->finalize_cancelled_notifier);
515         notifier_list_add(&job->job.on_finalize_completed,
516                           &job->finalize_completed_notifier);
517         notifier_list_add(&job->job.on_pending, &job->pending_notifier);
518         notifier_list_add(&job->job.on_ready, &job->ready_notifier);
519         notifier_list_add(&job->job.on_idle, &job->idle_notifier);
520     }
521 
522     error_setg(&job->blocker, "block device is in use by block job: %s",
523                job_type_str(&job->job));
524 
525     ret = block_job_add_bdrv(job, "main node", bs, perm, shared_perm, errp);
526     if (ret < 0) {
527         goto fail;
528     }
529 
530     bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
531 
532     if (!block_job_set_speed(job, speed, errp)) {
533         goto fail;
534     }
535 
536     return job;
537 
538 fail:
539     job_early_fail(&job->job);
540     return NULL;
541 }
542 
543 void block_job_iostatus_reset_locked(BlockJob *job)
544 {
545     GLOBAL_STATE_CODE();
546     if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
547         return;
548     }
549     assert(job->job.user_paused && job->job.pause_count > 0);
550     job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
551 }
552 
553 static void block_job_iostatus_reset(BlockJob *job)
554 {
555     JOB_LOCK_GUARD();
556     block_job_iostatus_reset_locked(job);
557 }
558 
559 void block_job_user_resume(Job *job)
560 {
561     BlockJob *bjob = container_of(job, BlockJob, job);
562     GLOBAL_STATE_CODE();
563     block_job_iostatus_reset(bjob);
564 }
565 
566 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
567                                         int is_read, int error)
568 {
569     BlockErrorAction action;
570     IO_CODE();
571 
572     switch (on_err) {
573     case BLOCKDEV_ON_ERROR_ENOSPC:
574     case BLOCKDEV_ON_ERROR_AUTO:
575         action = (error == ENOSPC) ?
576                  BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
577         break;
578     case BLOCKDEV_ON_ERROR_STOP:
579         action = BLOCK_ERROR_ACTION_STOP;
580         break;
581     case BLOCKDEV_ON_ERROR_REPORT:
582         action = BLOCK_ERROR_ACTION_REPORT;
583         break;
584     case BLOCKDEV_ON_ERROR_IGNORE:
585         action = BLOCK_ERROR_ACTION_IGNORE;
586         break;
587     default:
588         abort();
589     }
590     if (!block_job_is_internal(job)) {
591         qapi_event_send_block_job_error(job->job.id,
592                                         is_read ? IO_OPERATION_TYPE_READ :
593                                         IO_OPERATION_TYPE_WRITE,
594                                         action);
595     }
596     if (action == BLOCK_ERROR_ACTION_STOP) {
597         WITH_JOB_LOCK_GUARD() {
598             if (!job->job.user_paused) {
599                 job_pause_locked(&job->job);
600                 /*
601                  * make the pause user visible, which will be
602                  * resumed from QMP.
603                  */
604                 job->job.user_paused = true;
605             }
606             block_job_iostatus_set_err_locked(job, error);
607         }
608     }
609     return action;
610 }
611 
612 AioContext *block_job_get_aio_context(BlockJob *job)
613 {
614     GLOBAL_STATE_CODE();
615     return job->job.aio_context;
616 }
617