Lines Matching refs:q
168 static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q, in nvme_init_queue() argument
176 q->head = q->tail = 0; in nvme_init_queue()
177 q->queue = qemu_try_memalign(qemu_real_host_page_size(), bytes); in nvme_init_queue()
178 if (!q->queue) { in nvme_init_queue()
182 memset(q->queue, 0, bytes); in nvme_init_queue()
183 r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp); in nvme_init_queue()
190 static void nvme_free_queue(NVMeQueue *q) in nvme_free_queue() argument
192 qemu_vfree(q->queue); in nvme_free_queue()
195 static void nvme_free_queue_pair(NVMeQueuePair *q) in nvme_free_queue_pair() argument
197 trace_nvme_free_queue_pair(q->index, q, &q->cq, &q->sq); in nvme_free_queue_pair()
198 if (q->completion_bh) { in nvme_free_queue_pair()
199 qemu_bh_delete(q->completion_bh); in nvme_free_queue_pair()
201 nvme_free_queue(&q->sq); in nvme_free_queue_pair()
202 nvme_free_queue(&q->cq); in nvme_free_queue_pair()
203 qemu_vfree(q->prp_list_pages); in nvme_free_queue_pair()
204 qemu_mutex_destroy(&q->lock); in nvme_free_queue_pair()
205 g_free(q); in nvme_free_queue_pair()
210 NVMeQueuePair *q = opaque; in nvme_free_req_queue_cb() local
212 qemu_mutex_lock(&q->lock); in nvme_free_req_queue_cb()
213 while (q->free_req_head != -1 && in nvme_free_req_queue_cb()
214 qemu_co_enter_next(&q->free_req_queue, &q->lock)) { in nvme_free_req_queue_cb()
217 qemu_mutex_unlock(&q->lock); in nvme_free_req_queue_cb()
227 NVMeQueuePair *q; in nvme_create_queue_pair() local
231 q = g_try_new0(NVMeQueuePair, 1); in nvme_create_queue_pair()
232 if (!q) { in nvme_create_queue_pair()
236 trace_nvme_create_queue_pair(idx, q, size, aio_context, in nvme_create_queue_pair()
240 q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size(), bytes); in nvme_create_queue_pair()
241 if (!q->prp_list_pages) { in nvme_create_queue_pair()
245 memset(q->prp_list_pages, 0, bytes); in nvme_create_queue_pair()
246 qemu_mutex_init(&q->lock); in nvme_create_queue_pair()
247 q->s = s; in nvme_create_queue_pair()
248 q->index = idx; in nvme_create_queue_pair()
249 qemu_co_queue_init(&q->free_req_queue); in nvme_create_queue_pair()
250 q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q); in nvme_create_queue_pair()
251 r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes, in nvme_create_queue_pair()
257 q->free_req_head = -1; in nvme_create_queue_pair()
259 NVMeRequest *req = &q->reqs[i]; in nvme_create_queue_pair()
261 req->free_req_next = q->free_req_head; in nvme_create_queue_pair()
262 q->free_req_head = i; in nvme_create_queue_pair()
263 req->prp_list_page = q->prp_list_pages + i * s->page_size; in nvme_create_queue_pair()
267 if (!nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, errp)) { in nvme_create_queue_pair()
270 q->sq.doorbell = &s->doorbells[idx * s->doorbell_scale].sq_tail; in nvme_create_queue_pair()
272 if (!nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, errp)) { in nvme_create_queue_pair()
275 q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head; in nvme_create_queue_pair()
277 return q; in nvme_create_queue_pair()
279 nvme_free_queue_pair(q); in nvme_create_queue_pair()
284 static void nvme_kick(NVMeQueuePair *q) in nvme_kick() argument
286 BDRVNVMeState *s = q->s; in nvme_kick()
288 if (!q->need_kick) { in nvme_kick()
291 trace_nvme_kick(s, q->index); in nvme_kick()
292 assert(!(q->sq.tail & 0xFF00)); in nvme_kick()
295 *q->sq.doorbell = cpu_to_le32(q->sq.tail); in nvme_kick()
296 q->inflight += q->need_kick; in nvme_kick()
297 q->need_kick = 0; in nvme_kick()
300 static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q) in nvme_get_free_req_nofail_locked() argument
304 req = &q->reqs[q->free_req_head]; in nvme_get_free_req_nofail_locked()
305 q->free_req_head = req->free_req_next; in nvme_get_free_req_nofail_locked()
311 static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q) in nvme_get_free_req_nowait() argument
313 QEMU_LOCK_GUARD(&q->lock); in nvme_get_free_req_nowait()
314 if (q->free_req_head == -1) { in nvme_get_free_req_nowait()
317 return nvme_get_free_req_nofail_locked(q); in nvme_get_free_req_nowait()
324 static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) in nvme_get_free_req() argument
326 QEMU_LOCK_GUARD(&q->lock); in nvme_get_free_req()
328 while (q->free_req_head == -1) { in nvme_get_free_req()
329 trace_nvme_free_req_queue_wait(q->s, q->index); in nvme_get_free_req()
330 qemu_co_queue_wait(&q->free_req_queue, &q->lock); in nvme_get_free_req()
333 return nvme_get_free_req_nofail_locked(q); in nvme_get_free_req()
337 static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) in nvme_put_free_req_locked() argument
339 req->free_req_next = q->free_req_head; in nvme_put_free_req_locked()
340 q->free_req_head = req - q->reqs; in nvme_put_free_req_locked()
344 static void nvme_wake_free_req_locked(NVMeQueuePair *q) in nvme_wake_free_req_locked() argument
346 if (!qemu_co_queue_empty(&q->free_req_queue)) { in nvme_wake_free_req_locked()
347 replay_bh_schedule_oneshot_event(q->s->aio_context, in nvme_wake_free_req_locked()
348 nvme_free_req_queue_cb, q); in nvme_wake_free_req_locked()
353 static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req) in nvme_put_free_req_and_wake() argument
355 qemu_mutex_lock(&q->lock); in nvme_put_free_req_and_wake()
356 nvme_put_free_req_locked(q, req); in nvme_put_free_req_and_wake()
357 nvme_wake_free_req_locked(q); in nvme_put_free_req_and_wake()
358 qemu_mutex_unlock(&q->lock); in nvme_put_free_req_and_wake()
384 static bool nvme_process_completion(NVMeQueuePair *q) in nvme_process_completion() argument
386 BDRVNVMeState *s = q->s; in nvme_process_completion()
392 trace_nvme_process_completion(s, q->index, q->inflight); in nvme_process_completion()
402 qemu_bh_schedule(q->completion_bh); in nvme_process_completion()
404 assert(q->inflight >= 0); in nvme_process_completion()
405 while (q->inflight) { in nvme_process_completion()
409 c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES]; in nvme_process_completion()
410 if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) { in nvme_process_completion()
417 q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE; in nvme_process_completion()
418 if (!q->cq.head) { in nvme_process_completion()
419 q->cq_phase = !q->cq_phase; in nvme_process_completion()
428 trace_nvme_complete_command(s, q->index, cid); in nvme_process_completion()
429 preq = &q->reqs[cid - 1]; in nvme_process_completion()
433 nvme_put_free_req_locked(q, preq); in nvme_process_completion()
435 q->inflight--; in nvme_process_completion()
436 qemu_mutex_unlock(&q->lock); in nvme_process_completion()
438 qemu_mutex_lock(&q->lock); in nvme_process_completion()
444 *q->cq.doorbell = cpu_to_le32(q->cq.head); in nvme_process_completion()
445 nvme_wake_free_req_locked(q); in nvme_process_completion()
448 qemu_bh_cancel(q->completion_bh); in nvme_process_completion()
455 NVMeQueuePair *q = opaque; in nvme_process_completion_bh() local
463 *q->cq.doorbell = cpu_to_le32(q->cq.head); in nvme_process_completion_bh()
464 nvme_wake_free_req_locked(q); in nvme_process_completion_bh()
466 nvme_process_completion(q); in nvme_process_completion_bh()
485 NVMeQueuePair *q = opaque; in nvme_deferred_fn() local
487 QEMU_LOCK_GUARD(&q->lock); in nvme_deferred_fn()
488 nvme_kick(q); in nvme_deferred_fn()
489 nvme_process_completion(q); in nvme_deferred_fn()
492 static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req, in nvme_submit_command() argument
501 trace_nvme_submit_command(q->s, q->index, req->cid); in nvme_submit_command()
503 qemu_mutex_lock(&q->lock); in nvme_submit_command()
504 memcpy((uint8_t *)q->sq.queue + in nvme_submit_command()
505 q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd)); in nvme_submit_command()
506 q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE; in nvme_submit_command()
507 q->need_kick++; in nvme_submit_command()
508 qemu_mutex_unlock(&q->lock); in nvme_submit_command()
510 defer_call(nvme_deferred_fn, q); in nvme_submit_command()
523 NVMeQueuePair *q = s->queues[INDEX_ADMIN]; in nvme_admin_cmd_sync() local
527 req = nvme_get_free_req_nowait(q); in nvme_admin_cmd_sync()
531 nvme_submit_command(q, req, cmd, nvme_admin_cmd_sync_cb, &ret); in nvme_admin_cmd_sync()
628 static void nvme_poll_queue(NVMeQueuePair *q) in nvme_poll_queue() argument
630 const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; in nvme_poll_queue()
631 NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; in nvme_poll_queue()
633 trace_nvme_poll_queue(q->s, q->index); in nvme_poll_queue()
639 if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) { in nvme_poll_queue()
643 qemu_mutex_lock(&q->lock); in nvme_poll_queue()
644 while (nvme_process_completion(q)) { in nvme_poll_queue()
647 qemu_mutex_unlock(&q->lock); in nvme_poll_queue()
673 NVMeQueuePair *q; in nvme_add_io_queue() local
678 q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs), in nvme_add_io_queue()
680 if (!q) { in nvme_add_io_queue()
685 .dptr.prp1 = cpu_to_le64(q->cq.iova), in nvme_add_io_queue()
695 .dptr.prp1 = cpu_to_le64(q->sq.iova), in nvme_add_io_queue()
704 s->queues[n] = q; in nvme_add_io_queue()
708 nvme_free_queue_pair(q); in nvme_add_io_queue()
720 NVMeQueuePair *q = s->queues[i]; in nvme_poll_cb() local
721 const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; in nvme_poll_cb()
722 NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; in nvme_poll_cb()
728 if ((le16_to_cpu(cqe->status) & 0x1) != q->cq_phase) { in nvme_poll_cb()
747 NVMeQueuePair *q; in nvme_init() local
838 q = nvme_create_queue_pair(s, aio_context, 0, NVME_QUEUE_SIZE, errp); in nvme_init()
839 if (!q) { in nvme_init()
843 s->queues[INDEX_ADMIN] = q; in nvme_init()
848 regs->asq = cpu_to_le64(q->sq.iova); in nvme_init()
849 regs->acq = cpu_to_le64(q->cq.iova); in nvme_init()
1551 NVMeQueuePair *q = s->queues[i]; in nvme_detach_aio_context() local
1553 qemu_bh_delete(q->completion_bh); in nvme_detach_aio_context()
1554 q->completion_bh = NULL; in nvme_detach_aio_context()
1573 NVMeQueuePair *q = s->queues[i]; in nvme_attach_aio_context() local
1575 q->completion_bh = in nvme_attach_aio_context()
1576 aio_bh_new(new_context, nvme_process_completion_bh, q); in nvme_attach_aio_context()