xref: /openbmc/qemu/block/nvme.c (revision d36f7de8)
1 /*
2  * NVMe block driver based on vfio
3  *
4  * Copyright 2016 - 2018 Red Hat, Inc.
5  *
6  * Authors:
7  *   Fam Zheng <famz@redhat.com>
8  *   Paolo Bonzini <pbonzini@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or later.
11  * See the COPYING file in the top-level directory.
12  */
13 
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/cutils.h"
21 #include "qemu/option.h"
22 #include "qemu/vfio-helpers.h"
23 #include "block/block_int.h"
24 #include "trace.h"
25 
26 #include "block/nvme.h"
27 
28 #define NVME_SQ_ENTRY_BYTES 64
29 #define NVME_CQ_ENTRY_BYTES 16
30 #define NVME_QUEUE_SIZE 128
31 #define NVME_BAR_SIZE 8192
32 
33 typedef struct {
34     int32_t  head, tail;
35     uint8_t  *queue;
36     uint64_t iova;
37     /* Hardware MMIO register */
38     volatile uint32_t *doorbell;
39 } NVMeQueue;
40 
41 typedef struct {
42     BlockCompletionFunc *cb;
43     void *opaque;
44     int cid;
45     void *prp_list_page;
46     uint64_t prp_list_iova;
47     bool busy;
48 } NVMeRequest;
49 
50 typedef struct {
51     CoQueue     free_req_queue;
52     QemuMutex   lock;
53 
54     /* Fields protected by BQL */
55     int         index;
56     uint8_t     *prp_list_pages;
57 
58     /* Fields protected by @lock */
59     NVMeQueue   sq, cq;
60     int         cq_phase;
61     NVMeRequest reqs[NVME_QUEUE_SIZE];
62     bool        busy;
63     int         need_kick;
64     int         inflight;
65 } NVMeQueuePair;
66 
67 /* Memory mapped registers */
68 typedef volatile struct {
69     uint64_t cap;
70     uint32_t vs;
71     uint32_t intms;
72     uint32_t intmc;
73     uint32_t cc;
74     uint32_t reserved0;
75     uint32_t csts;
76     uint32_t nssr;
77     uint32_t aqa;
78     uint64_t asq;
79     uint64_t acq;
80     uint32_t cmbloc;
81     uint32_t cmbsz;
82     uint8_t  reserved1[0xec0];
83     uint8_t  cmd_set_specfic[0x100];
84     uint32_t doorbells[];
85 } QEMU_PACKED NVMeRegs;
86 
87 QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
88 
89 typedef struct {
90     AioContext *aio_context;
91     QEMUVFIOState *vfio;
92     NVMeRegs *regs;
93     /* The submission/completion queue pairs.
94      * [0]: admin queue.
95      * [1..]: io queues.
96      */
97     NVMeQueuePair **queues;
98     int nr_queues;
99     size_t page_size;
100     /* How many uint32_t elements does each doorbell entry take. */
101     size_t doorbell_scale;
102     bool write_cache_supported;
103     EventNotifier irq_notifier;
104     uint64_t nsze; /* Namespace size reported by identify command */
105     int nsid;      /* The namespace id to read/write data. */
106     uint64_t max_transfer;
107     bool plugged;
108 
109     CoMutex dma_map_lock;
110     CoQueue dma_flush_queue;
111 
112     /* Total size of mapped qiov, accessed under dma_map_lock */
113     int dma_map_count;
114 } BDRVNVMeState;
115 
116 #define NVME_BLOCK_OPT_DEVICE "device"
117 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
118 
119 static QemuOptsList runtime_opts = {
120     .name = "nvme",
121     .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
122     .desc = {
123         {
124             .name = NVME_BLOCK_OPT_DEVICE,
125             .type = QEMU_OPT_STRING,
126             .help = "NVMe PCI device address",
127         },
128         {
129             .name = NVME_BLOCK_OPT_NAMESPACE,
130             .type = QEMU_OPT_NUMBER,
131             .help = "NVMe namespace",
132         },
133         { /* end of list */ }
134     },
135 };
136 
137 static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q,
138                             int nentries, int entry_bytes, Error **errp)
139 {
140     BDRVNVMeState *s = bs->opaque;
141     size_t bytes;
142     int r;
143 
144     bytes = ROUND_UP(nentries * entry_bytes, s->page_size);
145     q->head = q->tail = 0;
146     q->queue = qemu_try_blockalign0(bs, bytes);
147 
148     if (!q->queue) {
149         error_setg(errp, "Cannot allocate queue");
150         return;
151     }
152     r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
153     if (r) {
154         error_setg(errp, "Cannot map queue");
155     }
156 }
157 
158 static void nvme_free_queue_pair(BlockDriverState *bs, NVMeQueuePair *q)
159 {
160     qemu_vfree(q->prp_list_pages);
161     qemu_vfree(q->sq.queue);
162     qemu_vfree(q->cq.queue);
163     qemu_mutex_destroy(&q->lock);
164     g_free(q);
165 }
166 
167 static void nvme_free_req_queue_cb(void *opaque)
168 {
169     NVMeQueuePair *q = opaque;
170 
171     qemu_mutex_lock(&q->lock);
172     while (qemu_co_enter_next(&q->free_req_queue, &q->lock)) {
173         /* Retry all pending requests */
174     }
175     qemu_mutex_unlock(&q->lock);
176 }
177 
178 static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
179                                              int idx, int size,
180                                              Error **errp)
181 {
182     int i, r;
183     BDRVNVMeState *s = bs->opaque;
184     Error *local_err = NULL;
185     NVMeQueuePair *q = g_new0(NVMeQueuePair, 1);
186     uint64_t prp_list_iova;
187 
188     qemu_mutex_init(&q->lock);
189     q->index = idx;
190     qemu_co_queue_init(&q->free_req_queue);
191     q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_QUEUE_SIZE);
192     r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
193                           s->page_size * NVME_QUEUE_SIZE,
194                           false, &prp_list_iova);
195     if (r) {
196         goto fail;
197     }
198     for (i = 0; i < NVME_QUEUE_SIZE; i++) {
199         NVMeRequest *req = &q->reqs[i];
200         req->cid = i + 1;
201         req->prp_list_page = q->prp_list_pages + i * s->page_size;
202         req->prp_list_iova = prp_list_iova + i * s->page_size;
203     }
204     nvme_init_queue(bs, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err);
205     if (local_err) {
206         error_propagate(errp, local_err);
207         goto fail;
208     }
209     q->sq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale];
210 
211     nvme_init_queue(bs, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err);
212     if (local_err) {
213         error_propagate(errp, local_err);
214         goto fail;
215     }
216     q->cq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale + 1];
217 
218     return q;
219 fail:
220     nvme_free_queue_pair(bs, q);
221     return NULL;
222 }
223 
224 /* With q->lock */
225 static void nvme_kick(BDRVNVMeState *s, NVMeQueuePair *q)
226 {
227     if (s->plugged || !q->need_kick) {
228         return;
229     }
230     trace_nvme_kick(s, q->index);
231     assert(!(q->sq.tail & 0xFF00));
232     /* Fence the write to submission queue entry before notifying the device. */
233     smp_wmb();
234     *q->sq.doorbell = cpu_to_le32(q->sq.tail);
235     q->inflight += q->need_kick;
236     q->need_kick = 0;
237 }
238 
239 /* Find a free request element if any, otherwise:
240  * a) if in coroutine context, try to wait for one to become available;
241  * b) if not in coroutine, return NULL;
242  */
243 static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
244 {
245     int i;
246     NVMeRequest *req = NULL;
247 
248     qemu_mutex_lock(&q->lock);
249     while (q->inflight + q->need_kick > NVME_QUEUE_SIZE - 2) {
250         /* We have to leave one slot empty as that is the full queue case (head
251          * == tail + 1). */
252         if (qemu_in_coroutine()) {
253             trace_nvme_free_req_queue_wait(q);
254             qemu_co_queue_wait(&q->free_req_queue, &q->lock);
255         } else {
256             qemu_mutex_unlock(&q->lock);
257             return NULL;
258         }
259     }
260     for (i = 0; i < NVME_QUEUE_SIZE; i++) {
261         if (!q->reqs[i].busy) {
262             q->reqs[i].busy = true;
263             req = &q->reqs[i];
264             break;
265         }
266     }
267     /* We have checked inflight and need_kick while holding q->lock, so one
268      * free req must be available. */
269     assert(req);
270     qemu_mutex_unlock(&q->lock);
271     return req;
272 }
273 
274 static inline int nvme_translate_error(const NvmeCqe *c)
275 {
276     uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF;
277     if (status) {
278         trace_nvme_error(le32_to_cpu(c->result),
279                          le16_to_cpu(c->sq_head),
280                          le16_to_cpu(c->sq_id),
281                          le16_to_cpu(c->cid),
282                          le16_to_cpu(status));
283     }
284     switch (status) {
285     case 0:
286         return 0;
287     case 1:
288         return -ENOSYS;
289     case 2:
290         return -EINVAL;
291     default:
292         return -EIO;
293     }
294 }
295 
296 /* With q->lock */
297 static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q)
298 {
299     bool progress = false;
300     NVMeRequest *preq;
301     NVMeRequest req;
302     NvmeCqe *c;
303 
304     trace_nvme_process_completion(s, q->index, q->inflight);
305     if (q->busy || s->plugged) {
306         trace_nvme_process_completion_queue_busy(s, q->index);
307         return false;
308     }
309     q->busy = true;
310     assert(q->inflight >= 0);
311     while (q->inflight) {
312         int16_t cid;
313         c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES];
314         if (!c->cid || (le16_to_cpu(c->status) & 0x1) == q->cq_phase) {
315             break;
316         }
317         q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
318         if (!q->cq.head) {
319             q->cq_phase = !q->cq_phase;
320         }
321         cid = le16_to_cpu(c->cid);
322         if (cid == 0 || cid > NVME_QUEUE_SIZE) {
323             fprintf(stderr, "Unexpected CID in completion queue: %" PRIu32 "\n",
324                     cid);
325             continue;
326         }
327         assert(cid <= NVME_QUEUE_SIZE);
328         trace_nvme_complete_command(s, q->index, cid);
329         preq = &q->reqs[cid - 1];
330         req = *preq;
331         assert(req.cid == cid);
332         assert(req.cb);
333         preq->busy = false;
334         preq->cb = preq->opaque = NULL;
335         qemu_mutex_unlock(&q->lock);
336         req.cb(req.opaque, nvme_translate_error(c));
337         qemu_mutex_lock(&q->lock);
338         c->cid = cpu_to_le16(0);
339         q->inflight--;
340         /* Flip Phase Tag bit. */
341         c->status = cpu_to_le16(le16_to_cpu(c->status) ^ 0x1);
342         progress = true;
343     }
344     if (progress) {
345         /* Notify the device so it can post more completions. */
346         smp_mb_release();
347         *q->cq.doorbell = cpu_to_le32(q->cq.head);
348         if (!qemu_co_queue_empty(&q->free_req_queue)) {
349             aio_bh_schedule_oneshot(s->aio_context, nvme_free_req_queue_cb, q);
350         }
351     }
352     q->busy = false;
353     return progress;
354 }
355 
356 static void nvme_trace_command(const NvmeCmd *cmd)
357 {
358     int i;
359 
360     for (i = 0; i < 8; ++i) {
361         uint8_t *cmdp = (uint8_t *)cmd + i * 8;
362         trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3],
363                                       cmdp[4], cmdp[5], cmdp[6], cmdp[7]);
364     }
365 }
366 
367 static void nvme_submit_command(BDRVNVMeState *s, NVMeQueuePair *q,
368                                 NVMeRequest *req,
369                                 NvmeCmd *cmd, BlockCompletionFunc cb,
370                                 void *opaque)
371 {
372     assert(!req->cb);
373     req->cb = cb;
374     req->opaque = opaque;
375     cmd->cid = cpu_to_le32(req->cid);
376 
377     trace_nvme_submit_command(s, q->index, req->cid);
378     nvme_trace_command(cmd);
379     qemu_mutex_lock(&q->lock);
380     memcpy((uint8_t *)q->sq.queue +
381            q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
382     q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
383     q->need_kick++;
384     nvme_kick(s, q);
385     nvme_process_completion(s, q);
386     qemu_mutex_unlock(&q->lock);
387 }
388 
389 static void nvme_cmd_sync_cb(void *opaque, int ret)
390 {
391     int *pret = opaque;
392     *pret = ret;
393 }
394 
395 static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
396                          NvmeCmd *cmd)
397 {
398     NVMeRequest *req;
399     BDRVNVMeState *s = bs->opaque;
400     int ret = -EINPROGRESS;
401     req = nvme_get_free_req(q);
402     if (!req) {
403         return -EBUSY;
404     }
405     nvme_submit_command(s, q, req, cmd, nvme_cmd_sync_cb, &ret);
406 
407     BDRV_POLL_WHILE(bs, ret == -EINPROGRESS);
408     return ret;
409 }
410 
411 static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
412 {
413     BDRVNVMeState *s = bs->opaque;
414     NvmeIdCtrl *idctrl;
415     NvmeIdNs *idns;
416     uint8_t *resp;
417     int r;
418     uint64_t iova;
419     NvmeCmd cmd = {
420         .opcode = NVME_ADM_CMD_IDENTIFY,
421         .cdw10 = cpu_to_le32(0x1),
422     };
423 
424     resp = qemu_try_blockalign0(bs, sizeof(NvmeIdCtrl));
425     if (!resp) {
426         error_setg(errp, "Cannot allocate buffer for identify response");
427         goto out;
428     }
429     idctrl = (NvmeIdCtrl *)resp;
430     idns = (NvmeIdNs *)resp;
431     r = qemu_vfio_dma_map(s->vfio, resp, sizeof(NvmeIdCtrl), true, &iova);
432     if (r) {
433         error_setg(errp, "Cannot map buffer for DMA");
434         goto out;
435     }
436     cmd.prp1 = cpu_to_le64(iova);
437 
438     if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
439         error_setg(errp, "Failed to identify controller");
440         goto out;
441     }
442 
443     if (le32_to_cpu(idctrl->nn) < namespace) {
444         error_setg(errp, "Invalid namespace");
445         goto out;
446     }
447     s->write_cache_supported = le32_to_cpu(idctrl->vwc) & 0x1;
448     s->max_transfer = (idctrl->mdts ? 1 << idctrl->mdts : 0) * s->page_size;
449     /* For now the page list buffer per command is one page, to hold at most
450      * s->page_size / sizeof(uint64_t) entries. */
451     s->max_transfer = MIN_NON_ZERO(s->max_transfer,
452                           s->page_size / sizeof(uint64_t) * s->page_size);
453 
454     memset(resp, 0, 4096);
455 
456     cmd.cdw10 = 0;
457     cmd.nsid = cpu_to_le32(namespace);
458     if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
459         error_setg(errp, "Failed to identify namespace");
460         goto out;
461     }
462 
463     s->nsze = le64_to_cpu(idns->nsze);
464 
465 out:
466     qemu_vfio_dma_unmap(s->vfio, resp);
467     qemu_vfree(resp);
468 }
469 
470 static bool nvme_poll_queues(BDRVNVMeState *s)
471 {
472     bool progress = false;
473     int i;
474 
475     for (i = 0; i < s->nr_queues; i++) {
476         NVMeQueuePair *q = s->queues[i];
477         qemu_mutex_lock(&q->lock);
478         while (nvme_process_completion(s, q)) {
479             /* Keep polling */
480             progress = true;
481         }
482         qemu_mutex_unlock(&q->lock);
483     }
484     return progress;
485 }
486 
487 static void nvme_handle_event(EventNotifier *n)
488 {
489     BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
490 
491     trace_nvme_handle_event(s);
492     aio_context_acquire(s->aio_context);
493     event_notifier_test_and_clear(n);
494     nvme_poll_queues(s);
495     aio_context_release(s->aio_context);
496 }
497 
498 static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
499 {
500     BDRVNVMeState *s = bs->opaque;
501     int n = s->nr_queues;
502     NVMeQueuePair *q;
503     NvmeCmd cmd;
504     int queue_size = NVME_QUEUE_SIZE;
505 
506     q = nvme_create_queue_pair(bs, n, queue_size, errp);
507     if (!q) {
508         return false;
509     }
510     cmd = (NvmeCmd) {
511         .opcode = NVME_ADM_CMD_CREATE_CQ,
512         .prp1 = cpu_to_le64(q->cq.iova),
513         .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
514         .cdw11 = cpu_to_le32(0x3),
515     };
516     if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
517         error_setg(errp, "Failed to create io queue [%d]", n);
518         nvme_free_queue_pair(bs, q);
519         return false;
520     }
521     cmd = (NvmeCmd) {
522         .opcode = NVME_ADM_CMD_CREATE_SQ,
523         .prp1 = cpu_to_le64(q->sq.iova),
524         .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
525         .cdw11 = cpu_to_le32(0x1 | (n << 16)),
526     };
527     if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
528         error_setg(errp, "Failed to create io queue [%d]", n);
529         nvme_free_queue_pair(bs, q);
530         return false;
531     }
532     s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
533     s->queues[n] = q;
534     s->nr_queues++;
535     return true;
536 }
537 
538 static bool nvme_poll_cb(void *opaque)
539 {
540     EventNotifier *e = opaque;
541     BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
542     bool progress = false;
543 
544     trace_nvme_poll_cb(s);
545     progress = nvme_poll_queues(s);
546     return progress;
547 }
548 
549 static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
550                      Error **errp)
551 {
552     BDRVNVMeState *s = bs->opaque;
553     int ret;
554     uint64_t cap;
555     uint64_t timeout_ms;
556     uint64_t deadline, now;
557     Error *local_err = NULL;
558 
559     qemu_co_mutex_init(&s->dma_map_lock);
560     qemu_co_queue_init(&s->dma_flush_queue);
561     s->nsid = namespace;
562     s->aio_context = bdrv_get_aio_context(bs);
563     ret = event_notifier_init(&s->irq_notifier, 0);
564     if (ret) {
565         error_setg(errp, "Failed to init event notifier");
566         return ret;
567     }
568 
569     s->vfio = qemu_vfio_open_pci(device, errp);
570     if (!s->vfio) {
571         ret = -EINVAL;
572         goto out;
573     }
574 
575     s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp);
576     if (!s->regs) {
577         ret = -EINVAL;
578         goto out;
579     }
580 
581     /* Perform initialize sequence as described in NVMe spec "7.6.1
582      * Initialization". */
583 
584     cap = le64_to_cpu(s->regs->cap);
585     if (!(cap & (1ULL << 37))) {
586         error_setg(errp, "Device doesn't support NVMe command set");
587         ret = -EINVAL;
588         goto out;
589     }
590 
591     s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF)));
592     s->doorbell_scale = (4 << (((cap >> 32) & 0xF))) / sizeof(uint32_t);
593     bs->bl.opt_mem_alignment = s->page_size;
594     timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000);
595 
596     /* Reset device to get a clean state. */
597     s->regs->cc = cpu_to_le32(le32_to_cpu(s->regs->cc) & 0xFE);
598     /* Wait for CSTS.RDY = 0. */
599     deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * 1000000ULL;
600     while (le32_to_cpu(s->regs->csts) & 0x1) {
601         if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
602             error_setg(errp, "Timeout while waiting for device to reset (%"
603                              PRId64 " ms)",
604                        timeout_ms);
605             ret = -ETIMEDOUT;
606             goto out;
607         }
608     }
609 
610     /* Set up admin queue. */
611     s->queues = g_new(NVMeQueuePair *, 1);
612     s->nr_queues = 1;
613     s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp);
614     if (!s->queues[0]) {
615         ret = -EINVAL;
616         goto out;
617     }
618     QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
619     s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
620     s->regs->asq = cpu_to_le64(s->queues[0]->sq.iova);
621     s->regs->acq = cpu_to_le64(s->queues[0]->cq.iova);
622 
623     /* After setting up all control registers we can enable device now. */
624     s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
625                               (ctz32(NVME_SQ_ENTRY_BYTES) << 16) |
626                               0x1);
627     /* Wait for CSTS.RDY = 1. */
628     now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
629     deadline = now + timeout_ms * 1000000;
630     while (!(le32_to_cpu(s->regs->csts) & 0x1)) {
631         if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
632             error_setg(errp, "Timeout while waiting for device to start (%"
633                              PRId64 " ms)",
634                        timeout_ms);
635             ret = -ETIMEDOUT;
636             goto out;
637         }
638     }
639 
640     ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
641                                  VFIO_PCI_MSIX_IRQ_INDEX, errp);
642     if (ret) {
643         goto out;
644     }
645     aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
646                            false, nvme_handle_event, nvme_poll_cb);
647 
648     nvme_identify(bs, namespace, &local_err);
649     if (local_err) {
650         error_propagate(errp, local_err);
651         ret = -EIO;
652         goto out;
653     }
654 
655     /* Set up command queues. */
656     if (!nvme_add_io_queue(bs, errp)) {
657         ret = -EIO;
658     }
659 out:
660     /* Cleaning up is done in nvme_file_open() upon error. */
661     return ret;
662 }
663 
664 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
665  *
666  *     nvme://0000:44:00.0/1
667  *
668  * where the "nvme://" is a fixed form of the protocol prefix, the middle part
669  * is the PCI address, and the last part is the namespace number starting from
670  * 1 according to the NVMe spec. */
671 static void nvme_parse_filename(const char *filename, QDict *options,
672                                 Error **errp)
673 {
674     int pref = strlen("nvme://");
675 
676     if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) {
677         const char *tmp = filename + pref;
678         char *device;
679         const char *namespace;
680         unsigned long ns;
681         const char *slash = strchr(tmp, '/');
682         if (!slash) {
683             qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, tmp);
684             return;
685         }
686         device = g_strndup(tmp, slash - tmp);
687         qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, device);
688         g_free(device);
689         namespace = slash + 1;
690         if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) {
691             error_setg(errp, "Invalid namespace '%s', positive number expected",
692                        namespace);
693             return;
694         }
695         qdict_put_str(options, NVME_BLOCK_OPT_NAMESPACE,
696                       *namespace ? namespace : "1");
697     }
698 }
699 
700 static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
701                                            Error **errp)
702 {
703     int ret;
704     BDRVNVMeState *s = bs->opaque;
705     NvmeCmd cmd = {
706         .opcode = NVME_ADM_CMD_SET_FEATURES,
707         .nsid = cpu_to_le32(s->nsid),
708         .cdw10 = cpu_to_le32(0x06),
709         .cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
710     };
711 
712     ret = nvme_cmd_sync(bs, s->queues[0], &cmd);
713     if (ret) {
714         error_setg(errp, "Failed to configure NVMe write cache");
715     }
716     return ret;
717 }
718 
719 static void nvme_close(BlockDriverState *bs)
720 {
721     int i;
722     BDRVNVMeState *s = bs->opaque;
723 
724     for (i = 0; i < s->nr_queues; ++i) {
725         nvme_free_queue_pair(bs, s->queues[i]);
726     }
727     g_free(s->queues);
728     aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
729                            false, NULL, NULL);
730     event_notifier_cleanup(&s->irq_notifier);
731     qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
732     qemu_vfio_close(s->vfio);
733 }
734 
735 static int nvme_file_open(BlockDriverState *bs, QDict *options, int flags,
736                           Error **errp)
737 {
738     const char *device;
739     QemuOpts *opts;
740     int namespace;
741     int ret;
742     BDRVNVMeState *s = bs->opaque;
743 
744     opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
745     qemu_opts_absorb_qdict(opts, options, &error_abort);
746     device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE);
747     if (!device) {
748         error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required");
749         qemu_opts_del(opts);
750         return -EINVAL;
751     }
752 
753     namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1);
754     ret = nvme_init(bs, device, namespace, errp);
755     qemu_opts_del(opts);
756     if (ret) {
757         goto fail;
758     }
759     if (flags & BDRV_O_NOCACHE) {
760         if (!s->write_cache_supported) {
761             error_setg(errp,
762                        "NVMe controller doesn't support write cache configuration");
763             ret = -EINVAL;
764         } else {
765             ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE),
766                                                   errp);
767         }
768         if (ret) {
769             goto fail;
770         }
771     }
772     bs->supported_write_flags = BDRV_REQ_FUA;
773     return 0;
774 fail:
775     nvme_close(bs);
776     return ret;
777 }
778 
779 static int64_t nvme_getlength(BlockDriverState *bs)
780 {
781     BDRVNVMeState *s = bs->opaque;
782 
783     return s->nsze << BDRV_SECTOR_BITS;
784 }
785 
786 /* Called with s->dma_map_lock */
787 static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
788                                             QEMUIOVector *qiov)
789 {
790     int r = 0;
791     BDRVNVMeState *s = bs->opaque;
792 
793     s->dma_map_count -= qiov->size;
794     if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) {
795         r = qemu_vfio_dma_reset_temporary(s->vfio);
796         if (!r) {
797             qemu_co_queue_restart_all(&s->dma_flush_queue);
798         }
799     }
800     return r;
801 }
802 
803 /* Called with s->dma_map_lock */
804 static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
805                                           NVMeRequest *req, QEMUIOVector *qiov)
806 {
807     BDRVNVMeState *s = bs->opaque;
808     uint64_t *pagelist = req->prp_list_page;
809     int i, j, r;
810     int entries = 0;
811 
812     assert(qiov->size);
813     assert(QEMU_IS_ALIGNED(qiov->size, s->page_size));
814     assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t));
815     for (i = 0; i < qiov->niov; ++i) {
816         bool retry = true;
817         uint64_t iova;
818 try_map:
819         r = qemu_vfio_dma_map(s->vfio,
820                               qiov->iov[i].iov_base,
821                               qiov->iov[i].iov_len,
822                               true, &iova);
823         if (r == -ENOMEM && retry) {
824             retry = false;
825             trace_nvme_dma_flush_queue_wait(s);
826             if (s->dma_map_count) {
827                 trace_nvme_dma_map_flush(s);
828                 qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock);
829             } else {
830                 r = qemu_vfio_dma_reset_temporary(s->vfio);
831                 if (r) {
832                     goto fail;
833                 }
834             }
835             goto try_map;
836         }
837         if (r) {
838             goto fail;
839         }
840 
841         for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) {
842             pagelist[entries++] = iova + j * s->page_size;
843         }
844         trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base,
845                                     qiov->iov[i].iov_len / s->page_size);
846     }
847 
848     s->dma_map_count += qiov->size;
849 
850     assert(entries <= s->page_size / sizeof(uint64_t));
851     switch (entries) {
852     case 0:
853         abort();
854     case 1:
855         cmd->prp1 = cpu_to_le64(pagelist[0]);
856         cmd->prp2 = 0;
857         break;
858     case 2:
859         cmd->prp1 = cpu_to_le64(pagelist[0]);
860         cmd->prp2 = cpu_to_le64(pagelist[1]);;
861         break;
862     default:
863         cmd->prp1 = cpu_to_le64(pagelist[0]);
864         cmd->prp2 = cpu_to_le64(req->prp_list_iova);
865         for (i = 0; i < entries - 1; ++i) {
866             pagelist[i] = cpu_to_le64(pagelist[i + 1]);
867         }
868         pagelist[entries - 1] = 0;
869         break;
870     }
871     trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
872     for (i = 0; i < entries; ++i) {
873         trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]);
874     }
875     return 0;
876 fail:
877     /* No need to unmap [0 - i) iovs even if we've failed, since we don't
878      * increment s->dma_map_count. This is okay for fixed mapping memory areas
879      * because they are already mapped before calling this function; for
880      * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
881      * calling qemu_vfio_dma_reset_temporary when necessary. */
882     return r;
883 }
884 
885 typedef struct {
886     Coroutine *co;
887     int ret;
888     AioContext *ctx;
889 } NVMeCoData;
890 
891 static void nvme_rw_cb_bh(void *opaque)
892 {
893     NVMeCoData *data = opaque;
894     qemu_coroutine_enter(data->co);
895 }
896 
897 static void nvme_rw_cb(void *opaque, int ret)
898 {
899     NVMeCoData *data = opaque;
900     data->ret = ret;
901     if (!data->co) {
902         /* The rw coroutine hasn't yielded, don't try to enter. */
903         return;
904     }
905     aio_bh_schedule_oneshot(data->ctx, nvme_rw_cb_bh, data);
906 }
907 
908 static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
909                                             uint64_t offset, uint64_t bytes,
910                                             QEMUIOVector *qiov,
911                                             bool is_write,
912                                             int flags)
913 {
914     int r;
915     BDRVNVMeState *s = bs->opaque;
916     NVMeQueuePair *ioq = s->queues[1];
917     NVMeRequest *req;
918     uint32_t cdw12 = (((bytes >> BDRV_SECTOR_BITS) - 1) & 0xFFFF) |
919                        (flags & BDRV_REQ_FUA ? 1 << 30 : 0);
920     NvmeCmd cmd = {
921         .opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ,
922         .nsid = cpu_to_le32(s->nsid),
923         .cdw10 = cpu_to_le32((offset >> BDRV_SECTOR_BITS) & 0xFFFFFFFF),
924         .cdw11 = cpu_to_le32(((offset >> BDRV_SECTOR_BITS) >> 32) & 0xFFFFFFFF),
925         .cdw12 = cpu_to_le32(cdw12),
926     };
927     NVMeCoData data = {
928         .ctx = bdrv_get_aio_context(bs),
929         .ret = -EINPROGRESS,
930     };
931 
932     trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
933     assert(s->nr_queues > 1);
934     req = nvme_get_free_req(ioq);
935     assert(req);
936 
937     qemu_co_mutex_lock(&s->dma_map_lock);
938     r = nvme_cmd_map_qiov(bs, &cmd, req, qiov);
939     qemu_co_mutex_unlock(&s->dma_map_lock);
940     if (r) {
941         req->busy = false;
942         return r;
943     }
944     nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
945 
946     data.co = qemu_coroutine_self();
947     while (data.ret == -EINPROGRESS) {
948         qemu_coroutine_yield();
949     }
950 
951     qemu_co_mutex_lock(&s->dma_map_lock);
952     r = nvme_cmd_unmap_qiov(bs, qiov);
953     qemu_co_mutex_unlock(&s->dma_map_lock);
954     if (r) {
955         return r;
956     }
957 
958     trace_nvme_rw_done(s, is_write, offset, bytes, data.ret);
959     return data.ret;
960 }
961 
962 static inline bool nvme_qiov_aligned(BlockDriverState *bs,
963                                      const QEMUIOVector *qiov)
964 {
965     int i;
966     BDRVNVMeState *s = bs->opaque;
967 
968     for (i = 0; i < qiov->niov; ++i) {
969         if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) ||
970             !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) {
971             trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
972                                       qiov->iov[i].iov_len, s->page_size);
973             return false;
974         }
975     }
976     return true;
977 }
978 
979 static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
980                        QEMUIOVector *qiov, bool is_write, int flags)
981 {
982     BDRVNVMeState *s = bs->opaque;
983     int r;
984     uint8_t *buf = NULL;
985     QEMUIOVector local_qiov;
986 
987     assert(QEMU_IS_ALIGNED(offset, s->page_size));
988     assert(QEMU_IS_ALIGNED(bytes, s->page_size));
989     assert(bytes <= s->max_transfer);
990     if (nvme_qiov_aligned(bs, qiov)) {
991         return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
992     }
993     trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
994     buf = qemu_try_blockalign(bs, bytes);
995 
996     if (!buf) {
997         return -ENOMEM;
998     }
999     qemu_iovec_init(&local_qiov, 1);
1000     if (is_write) {
1001         qemu_iovec_to_buf(qiov, 0, buf, bytes);
1002     }
1003     qemu_iovec_add(&local_qiov, buf, bytes);
1004     r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags);
1005     qemu_iovec_destroy(&local_qiov);
1006     if (!r && !is_write) {
1007         qemu_iovec_from_buf(qiov, 0, buf, bytes);
1008     }
1009     qemu_vfree(buf);
1010     return r;
1011 }
1012 
1013 static coroutine_fn int nvme_co_preadv(BlockDriverState *bs,
1014                                        uint64_t offset, uint64_t bytes,
1015                                        QEMUIOVector *qiov, int flags)
1016 {
1017     return nvme_co_prw(bs, offset, bytes, qiov, false, flags);
1018 }
1019 
1020 static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
1021                                         uint64_t offset, uint64_t bytes,
1022                                         QEMUIOVector *qiov, int flags)
1023 {
1024     return nvme_co_prw(bs, offset, bytes, qiov, true, flags);
1025 }
1026 
1027 static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
1028 {
1029     BDRVNVMeState *s = bs->opaque;
1030     NVMeQueuePair *ioq = s->queues[1];
1031     NVMeRequest *req;
1032     NvmeCmd cmd = {
1033         .opcode = NVME_CMD_FLUSH,
1034         .nsid = cpu_to_le32(s->nsid),
1035     };
1036     NVMeCoData data = {
1037         .ctx = bdrv_get_aio_context(bs),
1038         .ret = -EINPROGRESS,
1039     };
1040 
1041     assert(s->nr_queues > 1);
1042     req = nvme_get_free_req(ioq);
1043     assert(req);
1044     nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
1045 
1046     data.co = qemu_coroutine_self();
1047     if (data.ret == -EINPROGRESS) {
1048         qemu_coroutine_yield();
1049     }
1050 
1051     return data.ret;
1052 }
1053 
1054 
1055 static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
1056                                BlockReopenQueue *queue, Error **errp)
1057 {
1058     return 0;
1059 }
1060 
1061 static void nvme_refresh_filename(BlockDriverState *bs, QDict *opts)
1062 {
1063     qdict_del(opts, "filename");
1064 
1065     if (!qdict_size(opts)) {
1066         snprintf(bs->exact_filename, sizeof(bs->exact_filename), "%s://",
1067                  bs->drv->format_name);
1068     }
1069 
1070     qdict_put_str(opts, "driver", bs->drv->format_name);
1071     bs->full_open_options = qobject_ref(opts);
1072 }
1073 
1074 static void nvme_refresh_limits(BlockDriverState *bs, Error **errp)
1075 {
1076     BDRVNVMeState *s = bs->opaque;
1077 
1078     bs->bl.opt_mem_alignment = s->page_size;
1079     bs->bl.request_alignment = s->page_size;
1080     bs->bl.max_transfer = s->max_transfer;
1081 }
1082 
1083 static void nvme_detach_aio_context(BlockDriverState *bs)
1084 {
1085     BDRVNVMeState *s = bs->opaque;
1086 
1087     aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
1088                            false, NULL, NULL);
1089 }
1090 
1091 static void nvme_attach_aio_context(BlockDriverState *bs,
1092                                     AioContext *new_context)
1093 {
1094     BDRVNVMeState *s = bs->opaque;
1095 
1096     s->aio_context = new_context;
1097     aio_set_event_notifier(new_context, &s->irq_notifier,
1098                            false, nvme_handle_event, nvme_poll_cb);
1099 }
1100 
1101 static void nvme_aio_plug(BlockDriverState *bs)
1102 {
1103     BDRVNVMeState *s = bs->opaque;
1104     assert(!s->plugged);
1105     s->plugged = true;
1106 }
1107 
1108 static void nvme_aio_unplug(BlockDriverState *bs)
1109 {
1110     int i;
1111     BDRVNVMeState *s = bs->opaque;
1112     assert(s->plugged);
1113     s->plugged = false;
1114     for (i = 1; i < s->nr_queues; i++) {
1115         NVMeQueuePair *q = s->queues[i];
1116         qemu_mutex_lock(&q->lock);
1117         nvme_kick(s, q);
1118         nvme_process_completion(s, q);
1119         qemu_mutex_unlock(&q->lock);
1120     }
1121 }
1122 
1123 static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)
1124 {
1125     int ret;
1126     BDRVNVMeState *s = bs->opaque;
1127 
1128     ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
1129     if (ret) {
1130         /* FIXME: we may run out of IOVA addresses after repeated
1131          * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1132          * doesn't reclaim addresses for fixed mappings. */
1133         error_report("nvme_register_buf failed: %s", strerror(-ret));
1134     }
1135 }
1136 
1137 static void nvme_unregister_buf(BlockDriverState *bs, void *host)
1138 {
1139     BDRVNVMeState *s = bs->opaque;
1140 
1141     qemu_vfio_dma_unmap(s->vfio, host);
1142 }
1143 
1144 static BlockDriver bdrv_nvme = {
1145     .format_name              = "nvme",
1146     .protocol_name            = "nvme",
1147     .instance_size            = sizeof(BDRVNVMeState),
1148 
1149     .bdrv_parse_filename      = nvme_parse_filename,
1150     .bdrv_file_open           = nvme_file_open,
1151     .bdrv_close               = nvme_close,
1152     .bdrv_getlength           = nvme_getlength,
1153 
1154     .bdrv_co_preadv           = nvme_co_preadv,
1155     .bdrv_co_pwritev          = nvme_co_pwritev,
1156     .bdrv_co_flush_to_disk    = nvme_co_flush,
1157     .bdrv_reopen_prepare      = nvme_reopen_prepare,
1158 
1159     .bdrv_refresh_filename    = nvme_refresh_filename,
1160     .bdrv_refresh_limits      = nvme_refresh_limits,
1161 
1162     .bdrv_detach_aio_context  = nvme_detach_aio_context,
1163     .bdrv_attach_aio_context  = nvme_attach_aio_context,
1164 
1165     .bdrv_io_plug             = nvme_aio_plug,
1166     .bdrv_io_unplug           = nvme_aio_unplug,
1167 
1168     .bdrv_register_buf        = nvme_register_buf,
1169     .bdrv_unregister_buf      = nvme_unregister_buf,
1170 };
1171 
1172 static void bdrv_nvme_init(void)
1173 {
1174     bdrv_register(&bdrv_nvme);
1175 }
1176 
1177 block_init(bdrv_nvme_init);
1178