/openbmc/qemu/block/ |
H A D | io_uring.c | 53 AioContext *aio_context; member 218 assert(luringcb->co->ctx == s->aio_context); in luring_process_completions() 413 s->aio_context = NULL; in luring_detach_aio_context() 418 s->aio_context = new_context; in luring_attach_aio_context() 420 aio_set_fd_handler(s->aio_context, s->ring.ring_fd, in luring_attach_aio_context()
|
H A D | linux-aio.c | 58 AioContext *aio_context; member 107 assert(laiocb->co->ctx == laiocb->ctx->aio_context); in qemu_laio_process_completion() 346 uint64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH; in laio_max_batch() 440 s->aio_context = NULL; in laio_detach_aio_context() 445 s->aio_context = new_context; in laio_attach_aio_context()
|
H A D | nfs.c | 60 AioContext *aio_context; member 195 aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), in nfs_set_events() 259 replay_bh_schedule_oneshot_event(task->client->aio_context, in nfs_co_generic_cb() 372 aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), in nfs_detach_aio_context() 382 client->aio_context = new_context; in nfs_attach_aio_context() 390 aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), in nfs_client_close() 613 client->aio_context = bdrv_get_aio_context(bs); in nfs_file_open() 650 client->aio_context = qemu_get_aio_context(); in nfs_file_co_create() 726 replay_bh_schedule_oneshot_event(task->client->aio_context, in nfs_get_allocated_file_size_cb()
|
H A D | throttle-groups.c | 446 aio_co_enter(tgm->aio_context, co); in throttle_group_restart_queue() 552 tgm->aio_context = ctx; in throttle_group_register_tgm() 567 tgm->aio_context, in throttle_group_register_tgm() 598 AIO_WAIT_WHILE(tgm->aio_context, qatomic_read(&tgm->restart_pending) > 0); in throttle_group_unregister_tgm() 629 tgm->aio_context = new_context; in throttle_group_attach_aio_context() 655 tgm->aio_context = NULL; in throttle_group_detach_aio_context()
|
H A D | curl.c | 118 AioContext *aio_context; member 135 aio_set_fd_handler(s->aio_context, socket->fd, in curl_drop_socket() 183 aio_set_fd_handler(s->aio_context, fd, in curl_sock_cb() 187 aio_set_fd_handler(s->aio_context, fd, in curl_sock_cb() 191 aio_set_fd_handler(s->aio_context, fd, in curl_sock_cb() 196 aio_set_fd_handler(s->aio_context, fd, in curl_sock_cb() 607 s->aio_context = new_context; in curl_attach_aio_context() 787 s->aio_context = bdrv_get_aio_context(bs); in curl_open()
|
H A D | nvme.c | 99 AioContext *aio_context; member 221 AioContext *aio_context, in nvme_create_queue_pair() argument 236 trace_nvme_create_queue_pair(idx, q, size, aio_context, in nvme_create_queue_pair() 250 q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q); in nvme_create_queue_pair() 347 replay_bh_schedule_oneshot_event(q->s->aio_context, in nvme_wake_free_req_locked() 524 AioContext *aio_context = bdrv_get_aio_context(bs); in nvme_admin_cmd_sync() local 533 AIO_WAIT_WHILE(aio_context, ret == -EINPROGRESS); in nvme_admin_cmd_sync() 748 AioContext *aio_context = bdrv_get_aio_context(bs); in nvme_init() local 760 s->aio_context = bdrv_get_aio_context(bs); in nvme_init() 838 q = nvme_create_queue_pair(s, aio_context, 0, NVME_QUEUE_SIZE, errp); in nvme_init() [all …]
|
H A D | iscsi.c | 68 AioContext *aio_context; member 177 acb->bh = aio_bh_new(acb->iscsilun->aio_context, iscsi_bh_cb, acb); in iscsi_schedule_bh() 266 aio_timer_init(iTask->iscsilun->aio_context, in iscsi_co_generic_cb() 288 replay_bh_schedule_oneshot_event(iTask->iscsilun->aio_context, in iscsi_co_generic_cb() 365 aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsi), in iscsi_set_events() 1542 aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi), in iscsi_detach_aio_context() 1561 iscsilun->aio_context = new_context; in iscsi_attach_aio_context() 1565 iscsilun->nop_timer = aio_timer_new(iscsilun->aio_context, in iscsi_attach_aio_context() 1573 iscsilun->event_timer = aio_timer_new(iscsilun->aio_context, in iscsi_attach_aio_context() 1906 iscsilun->aio_context = bdrv_get_aio_context(bs); in iscsi_open() [all …]
|
H A D | gluster.c | 59 AioContext *aio_context; member 746 aio_co_schedule(acb->aio_context, acb->coroutine); in gluster_finish_aiocb() 1013 acb.aio_context = bdrv_get_aio_context(bs); in qemu_gluster_co_pwrite_zeroes() 1191 acb.aio_context = bdrv_get_aio_context(bs); in qemu_gluster_co_rw() 1258 acb.aio_context = bdrv_get_aio_context(bs); in qemu_gluster_co_flush_to_disk() 1306 acb.aio_context = bdrv_get_aio_context(bs); in qemu_gluster_co_pdiscard()
|
H A D | commit.c | 394 s->base = blk_new(s->common.job.aio_context, in commit_start() 406 s->top = blk_new(s->common.job.aio_context, 0, BLK_PERM_ALL); in commit_start()
|
H A D | replication.c | 150 assert(commit_job->aio_context == qemu_get_current_aio_context()); in replication_close()
|
H A D | trace-events | 157 nvme_create_queue_pair(unsigned q_index, void *q, size_t size, void *aio_context, int fd) "index %u…
|
H A D | mirror.c | 1859 s->target = blk_new(s->common.job.aio_context, in mirror_start_job()
|
/openbmc/qemu/ |
H A D | blockdev.c | 1524 AioContext *aio_context; in external_snapshot_abort() local 1528 aio_context = bdrv_get_aio_context(state->old_bs); in external_snapshot_abort() 1540 if (aio_context != tmp_context) { in external_snapshot_abort() 1542 aio_context, NULL, NULL); in external_snapshot_abort() 1577 AioContext *aio_context, 1597 AioContext *aio_context; in drive_backup_action() local 1624 aio_context = bdrv_get_aio_context(bs); in drive_backup_action() 1711 ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); in drive_backup_action() 1724 bs, target_bs, aio_context, in drive_backup_action() 1780 AioContext *aio_context; in blockdev_backup_action() local [all …]
|
H A D | job.c | 374 job->aio_context = ctx; in job_set_aio_context() 415 job->aio_context = ctx; in job_create() 585 next_aio_context = job->aio_context; in job_do_yield_locked() 595 next_aio_context = job->aio_context; in job_do_yield_locked() 1103 assert(job->aio_context == qemu_get_current_aio_context()); in job_co_entry() 1128 aio_co_enter(job->aio_context, job->co); in job_start() 1256 AIO_WAIT_WHILE_UNLOCKED(job->aio_context, in job_finish_sync_locked()
|
H A D | blockjob.c | 179 return job->job.aio_context; in child_job_get_parent_aio_context() 629 return job->job.aio_context; in block_job_get_aio_context()
|
H A D | block.c | 426 bs->aio_context = qemu_get_aio_context(); in bdrv_new() 7425 return bs ? bs->aio_context : qemu_get_aio_context(); in bdrv_get_aio_context() 7483 bs->aio_context = NULL; in bdrv_detach_aio_context() 7492 bs->aio_context = new_context; in bdrv_attach_aio_context()
|
H A D | qemu-img.c | 917 AioContext *aio_context = block_job_get_aio_context(job); in run_block_job() local 925 aio_poll(aio_context, true); in run_block_job()
|
/openbmc/qemu/include/block/ |
H A D | throttle-groups.h | 37 AioContext *aio_context; member
|
H A D | block_int-common.h | 1084 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */ member
|
/openbmc/qemu/util/ |
H A D | qemu-timer.c | 677 AioContext *aio_context; in qemu_clock_advance_virtual_time() local 680 aio_context = qemu_get_aio_context(); in qemu_clock_advance_virtual_time() 694 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]); in qemu_clock_advance_virtual_time()
|
H A D | throttle.c | 236 AioContext *aio_context, in throttle_timers_init() argument 249 throttle_timers_attach_aio_context(tt, aio_context); in throttle_timers_init()
|
/openbmc/qemu/include/qemu/ |
H A D | throttle.h | 126 AioContext *aio_context,
|
H A D | job.h | 86 AioContext *aio_context; member
|
/openbmc/qemu/tests/unit/ |
H A D | test-block-iothread.c | 517 g_assert(qemu_get_current_aio_context() == job->aio_context); in test_job_run() 528 g_assert(qemu_get_current_aio_context() == job->aio_context); in test_job_run() 773 g_assert(job->aio_context == ctx); in test_propagate_mirror()
|