1bdd6a90aSFam Zheng /*
2bdd6a90aSFam Zheng * NVMe block driver based on vfio
3bdd6a90aSFam Zheng *
4bdd6a90aSFam Zheng * Copyright 2016 - 2018 Red Hat, Inc.
5bdd6a90aSFam Zheng *
6bdd6a90aSFam Zheng * Authors:
7bdd6a90aSFam Zheng * Fam Zheng <famz@redhat.com>
8bdd6a90aSFam Zheng * Paolo Bonzini <pbonzini@redhat.com>
9bdd6a90aSFam Zheng *
10bdd6a90aSFam Zheng * This work is licensed under the terms of the GNU GPL, version 2 or later.
11bdd6a90aSFam Zheng * See the COPYING file in the top-level directory.
12bdd6a90aSFam Zheng */
13bdd6a90aSFam Zheng
14bdd6a90aSFam Zheng #include "qemu/osdep.h"
15bdd6a90aSFam Zheng #include <linux/vfio.h>
16bdd6a90aSFam Zheng #include "qapi/error.h"
17bdd6a90aSFam Zheng #include "qapi/qmp/qdict.h"
18bdd6a90aSFam Zheng #include "qapi/qmp/qstring.h"
19433fcea4SStefan Hajnoczi #include "qemu/defer-call.h"
20bdd6a90aSFam Zheng #include "qemu/error-report.h"
21db725815SMarkus Armbruster #include "qemu/main-loop.h"
220b8fa32fSMarkus Armbruster #include "qemu/module.h"
23bdd6a90aSFam Zheng #include "qemu/cutils.h"
24922a01a0SMarkus Armbruster #include "qemu/option.h"
255df022cfSPeter Maydell #include "qemu/memalign.h"
26bdd6a90aSFam Zheng #include "qemu/vfio-helpers.h"
27e2c1c34fSMarkus Armbruster #include "block/block-io.h"
28bdd6a90aSFam Zheng #include "block/block_int.h"
29f2e59000SStefan Hajnoczi #include "sysemu/block-backend.h"
30e4ec5ad4SPavel Dovgalyuk #include "sysemu/replay.h"
31bdd6a90aSFam Zheng #include "trace.h"
32bdd6a90aSFam Zheng
33a3d9a352SFam Zheng #include "block/nvme.h"
34bdd6a90aSFam Zheng
35bdd6a90aSFam Zheng #define NVME_SQ_ENTRY_BYTES 64
36bdd6a90aSFam Zheng #define NVME_CQ_ENTRY_BYTES 16
37bdd6a90aSFam Zheng #define NVME_QUEUE_SIZE 128
38f6845323SPhilippe Mathieu-Daudé #define NVME_DOORBELL_SIZE 4096
39bdd6a90aSFam Zheng
401086e95dSStefan Hajnoczi /*
411086e95dSStefan Hajnoczi * We have to leave one slot empty as that is the full queue case where
421086e95dSStefan Hajnoczi * head == tail + 1.
431086e95dSStefan Hajnoczi */
441086e95dSStefan Hajnoczi #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
451086e95dSStefan Hajnoczi
46b75fd5f5SStefan Hajnoczi typedef struct BDRVNVMeState BDRVNVMeState;
47b75fd5f5SStefan Hajnoczi
483214b0f0SPhilippe Mathieu-Daudé /* Same index is used for queues and IRQs */
493214b0f0SPhilippe Mathieu-Daudé #define INDEX_ADMIN 0
503214b0f0SPhilippe Mathieu-Daudé #define INDEX_IO(n) (1 + n)
513214b0f0SPhilippe Mathieu-Daudé
523214b0f0SPhilippe Mathieu-Daudé /* This driver shares a single MSIX IRQ for the admin and I/O queues */
533214b0f0SPhilippe Mathieu-Daudé enum {
543214b0f0SPhilippe Mathieu-Daudé MSIX_SHARED_IRQ_IDX = 0,
553214b0f0SPhilippe Mathieu-Daudé MSIX_IRQ_COUNT = 1
563214b0f0SPhilippe Mathieu-Daudé };
573214b0f0SPhilippe Mathieu-Daudé
58bdd6a90aSFam Zheng typedef struct {
59bdd6a90aSFam Zheng int32_t head, tail;
60bdd6a90aSFam Zheng uint8_t *queue;
61bdd6a90aSFam Zheng uint64_t iova;
62bdd6a90aSFam Zheng /* Hardware MMIO register */
63bdd6a90aSFam Zheng volatile uint32_t *doorbell;
64bdd6a90aSFam Zheng } NVMeQueue;
65bdd6a90aSFam Zheng
66bdd6a90aSFam Zheng typedef struct {
67bdd6a90aSFam Zheng BlockCompletionFunc *cb;
68bdd6a90aSFam Zheng void *opaque;
69bdd6a90aSFam Zheng int cid;
70bdd6a90aSFam Zheng void *prp_list_page;
71bdd6a90aSFam Zheng uint64_t prp_list_iova;
721086e95dSStefan Hajnoczi int free_req_next; /* q->reqs[] index of next free req */
73bdd6a90aSFam Zheng } NVMeRequest;
74bdd6a90aSFam Zheng
75bdd6a90aSFam Zheng typedef struct {
76bdd6a90aSFam Zheng QemuMutex lock;
77bdd6a90aSFam Zheng
78b75fd5f5SStefan Hajnoczi /* Read from I/O code path, initialized under BQL */
79b75fd5f5SStefan Hajnoczi BDRVNVMeState *s;
80bdd6a90aSFam Zheng int index;
81b75fd5f5SStefan Hajnoczi
82b75fd5f5SStefan Hajnoczi /* Fields protected by BQL */
83bdd6a90aSFam Zheng uint8_t *prp_list_pages;
84bdd6a90aSFam Zheng
85bdd6a90aSFam Zheng /* Fields protected by @lock */
86a5db74f3SStefan Hajnoczi CoQueue free_req_queue;
87bdd6a90aSFam Zheng NVMeQueue sq, cq;
88bdd6a90aSFam Zheng int cq_phase;
891086e95dSStefan Hajnoczi int free_req_head;
901086e95dSStefan Hajnoczi NVMeRequest reqs[NVME_NUM_REQS];
91bdd6a90aSFam Zheng int need_kick;
92bdd6a90aSFam Zheng int inflight;
937838c67fSStefan Hajnoczi
947838c67fSStefan Hajnoczi /* Thread-safe, no lock necessary */
957838c67fSStefan Hajnoczi QEMUBH *completion_bh;
96bdd6a90aSFam Zheng } NVMeQueuePair;
97bdd6a90aSFam Zheng
98b75fd5f5SStefan Hajnoczi struct BDRVNVMeState {
99bdd6a90aSFam Zheng AioContext *aio_context;
100bdd6a90aSFam Zheng QEMUVFIOState *vfio;
1014b19e9b8SPhilippe Mathieu-Daudé void *bar0_wo_map;
102f6845323SPhilippe Mathieu-Daudé /* Memory mapped registers */
103f6845323SPhilippe Mathieu-Daudé volatile struct {
104f6845323SPhilippe Mathieu-Daudé uint32_t sq_tail;
105f6845323SPhilippe Mathieu-Daudé uint32_t cq_head;
106f6845323SPhilippe Mathieu-Daudé } *doorbells;
107bdd6a90aSFam Zheng /* The submission/completion queue pairs.
108bdd6a90aSFam Zheng * [0]: admin queue.
109bdd6a90aSFam Zheng * [1..]: io queues.
110bdd6a90aSFam Zheng */
111bdd6a90aSFam Zheng NVMeQueuePair **queues;
1121b539bd6SPhilippe Mathieu-Daudé unsigned queue_count;
113bdd6a90aSFam Zheng size_t page_size;
114bdd6a90aSFam Zheng /* How many uint32_t elements does each doorbell entry take. */
115bdd6a90aSFam Zheng size_t doorbell_scale;
116bdd6a90aSFam Zheng bool write_cache_supported;
117b111b3fcSPhilippe Mathieu-Daudé EventNotifier irq_notifier[MSIX_IRQ_COUNT];
118118d1b6aSMaxim Levitsky
119bdd6a90aSFam Zheng uint64_t nsze; /* Namespace size reported by identify command */
120bdd6a90aSFam Zheng int nsid; /* The namespace id to read/write data. */
1211120407bSMax Reitz int blkshift;
122118d1b6aSMaxim Levitsky
123bdd6a90aSFam Zheng uint64_t max_transfer;
124bdd6a90aSFam Zheng
125e0dd95e3SMaxim Levitsky bool supports_write_zeroes;
126e87a09d6SMaxim Levitsky bool supports_discard;
127e0dd95e3SMaxim Levitsky
128bdd6a90aSFam Zheng CoMutex dma_map_lock;
129bdd6a90aSFam Zheng CoQueue dma_flush_queue;
130bdd6a90aSFam Zheng
131bdd6a90aSFam Zheng /* Total size of mapped qiov, accessed under dma_map_lock */
132bdd6a90aSFam Zheng int dma_map_count;
133cc61b074SMax Reitz
134cc61b074SMax Reitz /* PCI address (required for nvme_refresh_filename()) */
135cc61b074SMax Reitz char *device;
136f25e7ab2SPhilippe Mathieu-Daudé
137f25e7ab2SPhilippe Mathieu-Daudé struct {
138f25e7ab2SPhilippe Mathieu-Daudé uint64_t completion_errors;
139f25e7ab2SPhilippe Mathieu-Daudé uint64_t aligned_accesses;
140f25e7ab2SPhilippe Mathieu-Daudé uint64_t unaligned_accesses;
141f25e7ab2SPhilippe Mathieu-Daudé } stats;
142b75fd5f5SStefan Hajnoczi };
143bdd6a90aSFam Zheng
144bdd6a90aSFam Zheng #define NVME_BLOCK_OPT_DEVICE "device"
145bdd6a90aSFam Zheng #define NVME_BLOCK_OPT_NAMESPACE "namespace"
146bdd6a90aSFam Zheng
1477838c67fSStefan Hajnoczi static void nvme_process_completion_bh(void *opaque);
1487838c67fSStefan Hajnoczi
149bdd6a90aSFam Zheng static QemuOptsList runtime_opts = {
150bdd6a90aSFam Zheng .name = "nvme",
151bdd6a90aSFam Zheng .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
152bdd6a90aSFam Zheng .desc = {
153bdd6a90aSFam Zheng {
154bdd6a90aSFam Zheng .name = NVME_BLOCK_OPT_DEVICE,
155bdd6a90aSFam Zheng .type = QEMU_OPT_STRING,
156bdd6a90aSFam Zheng .help = "NVMe PCI device address",
157bdd6a90aSFam Zheng },
158bdd6a90aSFam Zheng {
159bdd6a90aSFam Zheng .name = NVME_BLOCK_OPT_NAMESPACE,
160bdd6a90aSFam Zheng .type = QEMU_OPT_NUMBER,
161bdd6a90aSFam Zheng .help = "NVMe namespace",
162bdd6a90aSFam Zheng },
163bdd6a90aSFam Zheng { /* end of list */ }
164bdd6a90aSFam Zheng },
165bdd6a90aSFam Zheng };
166bdd6a90aSFam Zheng
167dfa9c6c6SPhilippe Mathieu-Daudé /* Returns true on success, false on failure. */
nvme_init_queue(BDRVNVMeState * s,NVMeQueue * q,unsigned nentries,size_t entry_bytes,Error ** errp)168dfa9c6c6SPhilippe Mathieu-Daudé static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
1691b539bd6SPhilippe Mathieu-Daudé unsigned nentries, size_t entry_bytes, Error **errp)
170bdd6a90aSFam Zheng {
171ed46217dSZhao Liu ERRP_GUARD();
172bdd6a90aSFam Zheng size_t bytes;
173bdd6a90aSFam Zheng int r;
174bdd6a90aSFam Zheng
1758e3b0cbbSMarc-André Lureau bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size());
176bdd6a90aSFam Zheng q->head = q->tail = 0;
1778e3b0cbbSMarc-André Lureau q->queue = qemu_try_memalign(qemu_real_host_page_size(), bytes);
178bdd6a90aSFam Zheng if (!q->queue) {
179bdd6a90aSFam Zheng error_setg(errp, "Cannot allocate queue");
180dfa9c6c6SPhilippe Mathieu-Daudé return false;
181bdd6a90aSFam Zheng }
1822ed84693SPhilippe Mathieu-Daudé memset(q->queue, 0, bytes);
183521b97cdSPhilippe Mathieu-Daudé r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp);
184bdd6a90aSFam Zheng if (r) {
185521b97cdSPhilippe Mathieu-Daudé error_prepend(errp, "Cannot map queue: ");
186bdd6a90aSFam Zheng }
187521b97cdSPhilippe Mathieu-Daudé return r == 0;
188bdd6a90aSFam Zheng }
189bdd6a90aSFam Zheng
nvme_free_queue(NVMeQueue * q)190a8951438SPhilippe Mathieu-Daudé static void nvme_free_queue(NVMeQueue *q)
191a8951438SPhilippe Mathieu-Daudé {
192a8951438SPhilippe Mathieu-Daudé qemu_vfree(q->queue);
193a8951438SPhilippe Mathieu-Daudé }
194a8951438SPhilippe Mathieu-Daudé
nvme_free_queue_pair(NVMeQueuePair * q)195b75fd5f5SStefan Hajnoczi static void nvme_free_queue_pair(NVMeQueuePair *q)
196bdd6a90aSFam Zheng {
19753cedeaaSPhilippe Mathieu-Daudé trace_nvme_free_queue_pair(q->index, q, &q->cq, &q->sq);
1987838c67fSStefan Hajnoczi if (q->completion_bh) {
1997838c67fSStefan Hajnoczi qemu_bh_delete(q->completion_bh);
2007838c67fSStefan Hajnoczi }
201a8951438SPhilippe Mathieu-Daudé nvme_free_queue(&q->sq);
202a8951438SPhilippe Mathieu-Daudé nvme_free_queue(&q->cq);
203bdd6a90aSFam Zheng qemu_vfree(q->prp_list_pages);
204bdd6a90aSFam Zheng qemu_mutex_destroy(&q->lock);
205bdd6a90aSFam Zheng g_free(q);
206bdd6a90aSFam Zheng }
207bdd6a90aSFam Zheng
nvme_free_req_queue_cb(void * opaque)208bdd6a90aSFam Zheng static void nvme_free_req_queue_cb(void *opaque)
209bdd6a90aSFam Zheng {
210bdd6a90aSFam Zheng NVMeQueuePair *q = opaque;
211bdd6a90aSFam Zheng
212bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock);
213cf4fbc30SStefan Hajnoczi while (q->free_req_head != -1 &&
214cf4fbc30SStefan Hajnoczi qemu_co_enter_next(&q->free_req_queue, &q->lock)) {
215cf4fbc30SStefan Hajnoczi /* Retry waiting requests */
216bdd6a90aSFam Zheng }
217bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock);
218bdd6a90aSFam Zheng }
219bdd6a90aSFam Zheng
nvme_create_queue_pair(BDRVNVMeState * s,AioContext * aio_context,unsigned idx,size_t size,Error ** errp)2200a28b02eSPhilippe Mathieu-Daudé static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
2210a28b02eSPhilippe Mathieu-Daudé AioContext *aio_context,
2221b539bd6SPhilippe Mathieu-Daudé unsigned idx, size_t size,
223bdd6a90aSFam Zheng Error **errp)
224bdd6a90aSFam Zheng {
225ed46217dSZhao Liu ERRP_GUARD();
226bdd6a90aSFam Zheng int i, r;
2270ea45f76SPhilippe Mathieu-Daudé NVMeQueuePair *q;
228bdd6a90aSFam Zheng uint64_t prp_list_iova;
229f8fd3ebaSEric Auger size_t bytes;
230bdd6a90aSFam Zheng
2310ea45f76SPhilippe Mathieu-Daudé q = g_try_new0(NVMeQueuePair, 1);
2320ea45f76SPhilippe Mathieu-Daudé if (!q) {
233526c37c1SPhilippe Mathieu-Daudé error_setg(errp, "Cannot allocate queue pair");
2340ea45f76SPhilippe Mathieu-Daudé return NULL;
2350ea45f76SPhilippe Mathieu-Daudé }
2366e1e9ff2SPhilippe Mathieu-Daudé trace_nvme_create_queue_pair(idx, q, size, aio_context,
2376e1e9ff2SPhilippe Mathieu-Daudé event_notifier_get_fd(s->irq_notifier));
238f8fd3ebaSEric Auger bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
2398e3b0cbbSMarc-André Lureau qemu_real_host_page_size());
2408e3b0cbbSMarc-André Lureau q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size(), bytes);
2410ea45f76SPhilippe Mathieu-Daudé if (!q->prp_list_pages) {
242526c37c1SPhilippe Mathieu-Daudé error_setg(errp, "Cannot allocate PRP page list");
2430ea45f76SPhilippe Mathieu-Daudé goto fail;
2440ea45f76SPhilippe Mathieu-Daudé }
245f8fd3ebaSEric Auger memset(q->prp_list_pages, 0, bytes);
246bdd6a90aSFam Zheng qemu_mutex_init(&q->lock);
247b75fd5f5SStefan Hajnoczi q->s = s;
248bdd6a90aSFam Zheng q->index = idx;
249bdd6a90aSFam Zheng qemu_co_queue_init(&q->free_req_queue);
2500a28b02eSPhilippe Mathieu-Daudé q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
251f8fd3ebaSEric Auger r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
252521b97cdSPhilippe Mathieu-Daudé false, &prp_list_iova, errp);
253bdd6a90aSFam Zheng if (r) {
254521b97cdSPhilippe Mathieu-Daudé error_prepend(errp, "Cannot map buffer for DMA: ");
255bdd6a90aSFam Zheng goto fail;
256bdd6a90aSFam Zheng }
2571086e95dSStefan Hajnoczi q->free_req_head = -1;
2581086e95dSStefan Hajnoczi for (i = 0; i < NVME_NUM_REQS; i++) {
259bdd6a90aSFam Zheng NVMeRequest *req = &q->reqs[i];
260bdd6a90aSFam Zheng req->cid = i + 1;
2611086e95dSStefan Hajnoczi req->free_req_next = q->free_req_head;
2621086e95dSStefan Hajnoczi q->free_req_head = i;
263bdd6a90aSFam Zheng req->prp_list_page = q->prp_list_pages + i * s->page_size;
264bdd6a90aSFam Zheng req->prp_list_iova = prp_list_iova + i * s->page_size;
265bdd6a90aSFam Zheng }
2661086e95dSStefan Hajnoczi
267dfa9c6c6SPhilippe Mathieu-Daudé if (!nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, errp)) {
268bdd6a90aSFam Zheng goto fail;
269bdd6a90aSFam Zheng }
270f6845323SPhilippe Mathieu-Daudé q->sq.doorbell = &s->doorbells[idx * s->doorbell_scale].sq_tail;
271bdd6a90aSFam Zheng
272dfa9c6c6SPhilippe Mathieu-Daudé if (!nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, errp)) {
273bdd6a90aSFam Zheng goto fail;
274bdd6a90aSFam Zheng }
275f6845323SPhilippe Mathieu-Daudé q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head;
276bdd6a90aSFam Zheng
277bdd6a90aSFam Zheng return q;
278bdd6a90aSFam Zheng fail:
279b75fd5f5SStefan Hajnoczi nvme_free_queue_pair(q);
280bdd6a90aSFam Zheng return NULL;
281bdd6a90aSFam Zheng }
282bdd6a90aSFam Zheng
283bdd6a90aSFam Zheng /* With q->lock */
nvme_kick(NVMeQueuePair * q)284b75fd5f5SStefan Hajnoczi static void nvme_kick(NVMeQueuePair *q)
285bdd6a90aSFam Zheng {
286b75fd5f5SStefan Hajnoczi BDRVNVMeState *s = q->s;
287b75fd5f5SStefan Hajnoczi
288f2e59000SStefan Hajnoczi if (!q->need_kick) {
289bdd6a90aSFam Zheng return;
290bdd6a90aSFam Zheng }
291bdd6a90aSFam Zheng trace_nvme_kick(s, q->index);
292bdd6a90aSFam Zheng assert(!(q->sq.tail & 0xFF00));
293bdd6a90aSFam Zheng /* Fence the write to submission queue entry before notifying the device. */
294bdd6a90aSFam Zheng smp_wmb();
295bdd6a90aSFam Zheng *q->sq.doorbell = cpu_to_le32(q->sq.tail);
296bdd6a90aSFam Zheng q->inflight += q->need_kick;
297bdd6a90aSFam Zheng q->need_kick = 0;
298bdd6a90aSFam Zheng }
299bdd6a90aSFam Zheng
nvme_get_free_req_nofail_locked(NVMeQueuePair * q)30082c45371SPaolo Bonzini static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q)
301bdd6a90aSFam Zheng {
3021086e95dSStefan Hajnoczi NVMeRequest *req;
303bdd6a90aSFam Zheng
3041086e95dSStefan Hajnoczi req = &q->reqs[q->free_req_head];
3051086e95dSStefan Hajnoczi q->free_req_head = req->free_req_next;
3061086e95dSStefan Hajnoczi req->free_req_next = -1;
307bdd6a90aSFam Zheng return req;
308bdd6a90aSFam Zheng }
309bdd6a90aSFam Zheng
31082c45371SPaolo Bonzini /* Return a free request element if any, otherwise return NULL. */
nvme_get_free_req_nowait(NVMeQueuePair * q)31182c45371SPaolo Bonzini static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
31282c45371SPaolo Bonzini {
31382c45371SPaolo Bonzini QEMU_LOCK_GUARD(&q->lock);
31482c45371SPaolo Bonzini if (q->free_req_head == -1) {
31582c45371SPaolo Bonzini return NULL;
31682c45371SPaolo Bonzini }
31782c45371SPaolo Bonzini return nvme_get_free_req_nofail_locked(q);
31882c45371SPaolo Bonzini }
31982c45371SPaolo Bonzini
32082c45371SPaolo Bonzini /*
32182c45371SPaolo Bonzini * Wait for a free request to become available if necessary, then
32282c45371SPaolo Bonzini * return it.
32382c45371SPaolo Bonzini */
nvme_get_free_req(NVMeQueuePair * q)32482c45371SPaolo Bonzini static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
32582c45371SPaolo Bonzini {
32682c45371SPaolo Bonzini QEMU_LOCK_GUARD(&q->lock);
32782c45371SPaolo Bonzini
32882c45371SPaolo Bonzini while (q->free_req_head == -1) {
32982c45371SPaolo Bonzini trace_nvme_free_req_queue_wait(q->s, q->index);
33082c45371SPaolo Bonzini qemu_co_queue_wait(&q->free_req_queue, &q->lock);
33182c45371SPaolo Bonzini }
33282c45371SPaolo Bonzini
33382c45371SPaolo Bonzini return nvme_get_free_req_nofail_locked(q);
33482c45371SPaolo Bonzini }
33582c45371SPaolo Bonzini
3361086e95dSStefan Hajnoczi /* With q->lock */
nvme_put_free_req_locked(NVMeQueuePair * q,NVMeRequest * req)3371086e95dSStefan Hajnoczi static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
3381086e95dSStefan Hajnoczi {
3391086e95dSStefan Hajnoczi req->free_req_next = q->free_req_head;
3401086e95dSStefan Hajnoczi q->free_req_head = req - q->reqs;
3411086e95dSStefan Hajnoczi }
3421086e95dSStefan Hajnoczi
3431086e95dSStefan Hajnoczi /* With q->lock */
nvme_wake_free_req_locked(NVMeQueuePair * q)344b75fd5f5SStefan Hajnoczi static void nvme_wake_free_req_locked(NVMeQueuePair *q)
3451086e95dSStefan Hajnoczi {
3461086e95dSStefan Hajnoczi if (!qemu_co_queue_empty(&q->free_req_queue)) {
347b75fd5f5SStefan Hajnoczi replay_bh_schedule_oneshot_event(q->s->aio_context,
3481086e95dSStefan Hajnoczi nvme_free_req_queue_cb, q);
3491086e95dSStefan Hajnoczi }
3501086e95dSStefan Hajnoczi }
3511086e95dSStefan Hajnoczi
3521086e95dSStefan Hajnoczi /* Insert a request in the freelist and wake waiters */
nvme_put_free_req_and_wake(NVMeQueuePair * q,NVMeRequest * req)353b75fd5f5SStefan Hajnoczi static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req)
3541086e95dSStefan Hajnoczi {
3551086e95dSStefan Hajnoczi qemu_mutex_lock(&q->lock);
3561086e95dSStefan Hajnoczi nvme_put_free_req_locked(q, req);
357b75fd5f5SStefan Hajnoczi nvme_wake_free_req_locked(q);
3581086e95dSStefan Hajnoczi qemu_mutex_unlock(&q->lock);
3591086e95dSStefan Hajnoczi }
3601086e95dSStefan Hajnoczi
nvme_translate_error(const NvmeCqe * c)361bdd6a90aSFam Zheng static inline int nvme_translate_error(const NvmeCqe *c)
362bdd6a90aSFam Zheng {
363bdd6a90aSFam Zheng uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF;
364bdd6a90aSFam Zheng if (status) {
365bdd6a90aSFam Zheng trace_nvme_error(le32_to_cpu(c->result),
366bdd6a90aSFam Zheng le16_to_cpu(c->sq_head),
367bdd6a90aSFam Zheng le16_to_cpu(c->sq_id),
368bdd6a90aSFam Zheng le16_to_cpu(c->cid),
369bdd6a90aSFam Zheng le16_to_cpu(status));
370bdd6a90aSFam Zheng }
371bdd6a90aSFam Zheng switch (status) {
372bdd6a90aSFam Zheng case 0:
373bdd6a90aSFam Zheng return 0;
374bdd6a90aSFam Zheng case 1:
375bdd6a90aSFam Zheng return -ENOSYS;
376bdd6a90aSFam Zheng case 2:
377bdd6a90aSFam Zheng return -EINVAL;
378bdd6a90aSFam Zheng default:
379bdd6a90aSFam Zheng return -EIO;
380bdd6a90aSFam Zheng }
381bdd6a90aSFam Zheng }
382bdd6a90aSFam Zheng
383bdd6a90aSFam Zheng /* With q->lock */
nvme_process_completion(NVMeQueuePair * q)384b75fd5f5SStefan Hajnoczi static bool nvme_process_completion(NVMeQueuePair *q)
385bdd6a90aSFam Zheng {
386b75fd5f5SStefan Hajnoczi BDRVNVMeState *s = q->s;
387bdd6a90aSFam Zheng bool progress = false;
388bdd6a90aSFam Zheng NVMeRequest *preq;
389bdd6a90aSFam Zheng NVMeRequest req;
390bdd6a90aSFam Zheng NvmeCqe *c;
391bdd6a90aSFam Zheng
392bdd6a90aSFam Zheng trace_nvme_process_completion(s, q->index, q->inflight);
3937838c67fSStefan Hajnoczi
3947838c67fSStefan Hajnoczi /*
3957838c67fSStefan Hajnoczi * Support re-entrancy when a request cb() function invokes aio_poll().
3967838c67fSStefan Hajnoczi * Pending completions must be visible to aio_poll() so that a cb()
3977838c67fSStefan Hajnoczi * function can wait for the completion of another request.
3987838c67fSStefan Hajnoczi *
3997838c67fSStefan Hajnoczi * The aio_poll() loop will execute our BH and we'll resume completion
4007838c67fSStefan Hajnoczi * processing there.
4017838c67fSStefan Hajnoczi */
4027838c67fSStefan Hajnoczi qemu_bh_schedule(q->completion_bh);
4037838c67fSStefan Hajnoczi
404bdd6a90aSFam Zheng assert(q->inflight >= 0);
405bdd6a90aSFam Zheng while (q->inflight) {
40604b3fb39SStefan Hajnoczi int ret;
407bdd6a90aSFam Zheng int16_t cid;
40804b3fb39SStefan Hajnoczi
409bdd6a90aSFam Zheng c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES];
410258867d1SMaxim Levitsky if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) {
411bdd6a90aSFam Zheng break;
412bdd6a90aSFam Zheng }
41304b3fb39SStefan Hajnoczi ret = nvme_translate_error(c);
414f25e7ab2SPhilippe Mathieu-Daudé if (ret) {
415f25e7ab2SPhilippe Mathieu-Daudé s->stats.completion_errors++;
416f25e7ab2SPhilippe Mathieu-Daudé }
417bdd6a90aSFam Zheng q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
418bdd6a90aSFam Zheng if (!q->cq.head) {
419bdd6a90aSFam Zheng q->cq_phase = !q->cq_phase;
420bdd6a90aSFam Zheng }
421bdd6a90aSFam Zheng cid = le16_to_cpu(c->cid);
422cc8fb0c3SVladimir Sementsov-Ogievskiy if (cid == 0 || cid > NVME_NUM_REQS) {
423cc8fb0c3SVladimir Sementsov-Ogievskiy warn_report("NVMe: Unexpected CID in completion queue: %" PRIu32
424cc8fb0c3SVladimir Sementsov-Ogievskiy ", should be within: 1..%u inclusively", cid,
425cc8fb0c3SVladimir Sementsov-Ogievskiy NVME_NUM_REQS);
426bdd6a90aSFam Zheng continue;
427bdd6a90aSFam Zheng }
428bdd6a90aSFam Zheng trace_nvme_complete_command(s, q->index, cid);
429bdd6a90aSFam Zheng preq = &q->reqs[cid - 1];
430bdd6a90aSFam Zheng req = *preq;
431bdd6a90aSFam Zheng assert(req.cid == cid);
432bdd6a90aSFam Zheng assert(req.cb);
4331086e95dSStefan Hajnoczi nvme_put_free_req_locked(q, preq);
434bdd6a90aSFam Zheng preq->cb = preq->opaque = NULL;
4357838c67fSStefan Hajnoczi q->inflight--;
436bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock);
43704b3fb39SStefan Hajnoczi req.cb(req.opaque, ret);
438bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock);
439bdd6a90aSFam Zheng progress = true;
440bdd6a90aSFam Zheng }
441bdd6a90aSFam Zheng if (progress) {
442bdd6a90aSFam Zheng /* Notify the device so it can post more completions. */
443bdd6a90aSFam Zheng smp_mb_release();
444bdd6a90aSFam Zheng *q->cq.doorbell = cpu_to_le32(q->cq.head);
445b75fd5f5SStefan Hajnoczi nvme_wake_free_req_locked(q);
446bdd6a90aSFam Zheng }
4477838c67fSStefan Hajnoczi
4487838c67fSStefan Hajnoczi qemu_bh_cancel(q->completion_bh);
4497838c67fSStefan Hajnoczi
450bdd6a90aSFam Zheng return progress;
451bdd6a90aSFam Zheng }
452bdd6a90aSFam Zheng
nvme_process_completion_bh(void * opaque)4537838c67fSStefan Hajnoczi static void nvme_process_completion_bh(void *opaque)
4547838c67fSStefan Hajnoczi {
4557838c67fSStefan Hajnoczi NVMeQueuePair *q = opaque;
4567838c67fSStefan Hajnoczi
4577838c67fSStefan Hajnoczi /*
4587838c67fSStefan Hajnoczi * We're being invoked because a nvme_process_completion() cb() function
4597838c67fSStefan Hajnoczi * called aio_poll(). The callback may be waiting for further completions
4607838c67fSStefan Hajnoczi * so notify the device that it has space to fill in more completions now.
4617838c67fSStefan Hajnoczi */
4627838c67fSStefan Hajnoczi smp_mb_release();
4637838c67fSStefan Hajnoczi *q->cq.doorbell = cpu_to_le32(q->cq.head);
4647838c67fSStefan Hajnoczi nvme_wake_free_req_locked(q);
4657838c67fSStefan Hajnoczi
4667838c67fSStefan Hajnoczi nvme_process_completion(q);
4677838c67fSStefan Hajnoczi }
4687838c67fSStefan Hajnoczi
nvme_trace_command(const NvmeCmd * cmd)469bdd6a90aSFam Zheng static void nvme_trace_command(const NvmeCmd *cmd)
470bdd6a90aSFam Zheng {
471bdd6a90aSFam Zheng int i;
472bdd6a90aSFam Zheng
473e266f52cSPhilippe Mathieu-Daudé if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW)) {
474e266f52cSPhilippe Mathieu-Daudé return;
475e266f52cSPhilippe Mathieu-Daudé }
476bdd6a90aSFam Zheng for (i = 0; i < 8; ++i) {
477bdd6a90aSFam Zheng uint8_t *cmdp = (uint8_t *)cmd + i * 8;
478bdd6a90aSFam Zheng trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3],
479bdd6a90aSFam Zheng cmdp[4], cmdp[5], cmdp[6], cmdp[7]);
480bdd6a90aSFam Zheng }
481bdd6a90aSFam Zheng }
482bdd6a90aSFam Zheng
nvme_deferred_fn(void * opaque)483ccee48aaSStefan Hajnoczi static void nvme_deferred_fn(void *opaque)
484f2e59000SStefan Hajnoczi {
485f2e59000SStefan Hajnoczi NVMeQueuePair *q = opaque;
486f2e59000SStefan Hajnoczi
487f2e59000SStefan Hajnoczi QEMU_LOCK_GUARD(&q->lock);
488f2e59000SStefan Hajnoczi nvme_kick(q);
489f2e59000SStefan Hajnoczi nvme_process_completion(q);
490f2e59000SStefan Hajnoczi }
491f2e59000SStefan Hajnoczi
nvme_submit_command(NVMeQueuePair * q,NVMeRequest * req,NvmeCmd * cmd,BlockCompletionFunc cb,void * opaque)492b75fd5f5SStefan Hajnoczi static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
493bdd6a90aSFam Zheng NvmeCmd *cmd, BlockCompletionFunc cb,
494bdd6a90aSFam Zheng void *opaque)
495bdd6a90aSFam Zheng {
496bdd6a90aSFam Zheng assert(!req->cb);
497bdd6a90aSFam Zheng req->cb = cb;
498bdd6a90aSFam Zheng req->opaque = opaque;
499a0546a7bSPhilippe Mathieu-Daudé cmd->cid = cpu_to_le16(req->cid);
500bdd6a90aSFam Zheng
501b75fd5f5SStefan Hajnoczi trace_nvme_submit_command(q->s, q->index, req->cid);
502bdd6a90aSFam Zheng nvme_trace_command(cmd);
503bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock);
504bdd6a90aSFam Zheng memcpy((uint8_t *)q->sq.queue +
505bdd6a90aSFam Zheng q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
506bdd6a90aSFam Zheng q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
507bdd6a90aSFam Zheng q->need_kick++;
508bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock);
50966547f41SStefan Hajnoczi
510ccee48aaSStefan Hajnoczi defer_call(nvme_deferred_fn, q);
511bdd6a90aSFam Zheng }
512bdd6a90aSFam Zheng
nvme_admin_cmd_sync_cb(void * opaque,int ret)51308d54067SPhilippe Mathieu-Daudé static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
514bdd6a90aSFam Zheng {
515bdd6a90aSFam Zheng int *pret = opaque;
516bdd6a90aSFam Zheng *pret = ret;
5174720cbeeSKevin Wolf aio_wait_kick();
518bdd6a90aSFam Zheng }
519bdd6a90aSFam Zheng
nvme_admin_cmd_sync(BlockDriverState * bs,NvmeCmd * cmd)52008d54067SPhilippe Mathieu-Daudé static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
521bdd6a90aSFam Zheng {
52208d54067SPhilippe Mathieu-Daudé BDRVNVMeState *s = bs->opaque;
52308d54067SPhilippe Mathieu-Daudé NVMeQueuePair *q = s->queues[INDEX_ADMIN];
524073a0697SPhilippe Mathieu-Daudé AioContext *aio_context = bdrv_get_aio_context(bs);
525bdd6a90aSFam Zheng NVMeRequest *req;
526bdd6a90aSFam Zheng int ret = -EINPROGRESS;
52782c45371SPaolo Bonzini req = nvme_get_free_req_nowait(q);
528bdd6a90aSFam Zheng if (!req) {
529bdd6a90aSFam Zheng return -EBUSY;
530bdd6a90aSFam Zheng }
53108d54067SPhilippe Mathieu-Daudé nvme_submit_command(q, req, cmd, nvme_admin_cmd_sync_cb, &ret);
532bdd6a90aSFam Zheng
533073a0697SPhilippe Mathieu-Daudé AIO_WAIT_WHILE(aio_context, ret == -EINPROGRESS);
534bdd6a90aSFam Zheng return ret;
535bdd6a90aSFam Zheng }
536bdd6a90aSFam Zheng
5377a5f00ddSPhilippe Mathieu-Daudé /* Returns true on success, false on failure. */
nvme_identify(BlockDriverState * bs,int namespace,Error ** errp)5387a5f00ddSPhilippe Mathieu-Daudé static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
539bdd6a90aSFam Zheng {
540ed46217dSZhao Liu ERRP_GUARD();
541bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
5427a5f00ddSPhilippe Mathieu-Daudé bool ret = false;
5434a613bd8SPhilippe Mathieu-Daudé QEMU_AUTO_VFREE union {
5447d3b214aSPhilippe Mathieu-Daudé NvmeIdCtrl ctrl;
5457d3b214aSPhilippe Mathieu-Daudé NvmeIdNs ns;
5464a613bd8SPhilippe Mathieu-Daudé } *id = NULL;
547118d1b6aSMaxim Levitsky NvmeLBAF *lbaf;
548e0dd95e3SMaxim Levitsky uint16_t oncs;
5491120407bSMax Reitz int r;
550bdd6a90aSFam Zheng uint64_t iova;
551bdd6a90aSFam Zheng NvmeCmd cmd = {
552bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_IDENTIFY,
553bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(0x1),
554bdd6a90aSFam Zheng };
5558e3b0cbbSMarc-André Lureau size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size());
556bdd6a90aSFam Zheng
5578e3b0cbbSMarc-André Lureau id = qemu_try_memalign(qemu_real_host_page_size(), id_size);
5584d980939SPhilippe Mathieu-Daudé if (!id) {
559bdd6a90aSFam Zheng error_setg(errp, "Cannot allocate buffer for identify response");
560bdd6a90aSFam Zheng goto out;
561bdd6a90aSFam Zheng }
562521b97cdSPhilippe Mathieu-Daudé r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova, errp);
563bdd6a90aSFam Zheng if (r) {
564521b97cdSPhilippe Mathieu-Daudé error_prepend(errp, "Cannot map buffer for DMA: ");
565bdd6a90aSFam Zheng goto out;
566bdd6a90aSFam Zheng }
567bdd6a90aSFam Zheng
5680aecd060SEric Auger memset(id, 0, id_size);
5692ed84693SPhilippe Mathieu-Daudé cmd.dptr.prp1 = cpu_to_le64(iova);
57008d54067SPhilippe Mathieu-Daudé if (nvme_admin_cmd_sync(bs, &cmd)) {
571bdd6a90aSFam Zheng error_setg(errp, "Failed to identify controller");
572bdd6a90aSFam Zheng goto out;
573bdd6a90aSFam Zheng }
574bdd6a90aSFam Zheng
5757d3b214aSPhilippe Mathieu-Daudé if (le32_to_cpu(id->ctrl.nn) < namespace) {
576bdd6a90aSFam Zheng error_setg(errp, "Invalid namespace");
577bdd6a90aSFam Zheng goto out;
578bdd6a90aSFam Zheng }
5797d3b214aSPhilippe Mathieu-Daudé s->write_cache_supported = le32_to_cpu(id->ctrl.vwc) & 0x1;
5807d3b214aSPhilippe Mathieu-Daudé s->max_transfer = (id->ctrl.mdts ? 1 << id->ctrl.mdts : 0) * s->page_size;
581bdd6a90aSFam Zheng /* For now the page list buffer per command is one page, to hold at most
582bdd6a90aSFam Zheng * s->page_size / sizeof(uint64_t) entries. */
583bdd6a90aSFam Zheng s->max_transfer = MIN_NON_ZERO(s->max_transfer,
584bdd6a90aSFam Zheng s->page_size / sizeof(uint64_t) * s->page_size);
585bdd6a90aSFam Zheng
5867d3b214aSPhilippe Mathieu-Daudé oncs = le16_to_cpu(id->ctrl.oncs);
58769265150SKlaus Jensen s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES);
588e87a09d6SMaxim Levitsky s->supports_discard = !!(oncs & NVME_ONCS_DSM);
589e0dd95e3SMaxim Levitsky
5900aecd060SEric Auger memset(id, 0, id_size);
591bdd6a90aSFam Zheng cmd.cdw10 = 0;
592bdd6a90aSFam Zheng cmd.nsid = cpu_to_le32(namespace);
59308d54067SPhilippe Mathieu-Daudé if (nvme_admin_cmd_sync(bs, &cmd)) {
594bdd6a90aSFam Zheng error_setg(errp, "Failed to identify namespace");
595bdd6a90aSFam Zheng goto out;
596bdd6a90aSFam Zheng }
597bdd6a90aSFam Zheng
5987d3b214aSPhilippe Mathieu-Daudé s->nsze = le64_to_cpu(id->ns.nsze);
5997d3b214aSPhilippe Mathieu-Daudé lbaf = &id->ns.lbaf[NVME_ID_NS_FLBAS_INDEX(id->ns.flbas)];
600bdd6a90aSFam Zheng
6017d3b214aSPhilippe Mathieu-Daudé if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id->ns.dlfeat) &&
6027d3b214aSPhilippe Mathieu-Daudé NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id->ns.dlfeat) ==
603e0dd95e3SMaxim Levitsky NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES) {
604e0dd95e3SMaxim Levitsky bs->supported_write_flags |= BDRV_REQ_MAY_UNMAP;
605e0dd95e3SMaxim Levitsky }
606e0dd95e3SMaxim Levitsky
607118d1b6aSMaxim Levitsky if (lbaf->ms) {
608118d1b6aSMaxim Levitsky error_setg(errp, "Namespaces with metadata are not yet supported");
609118d1b6aSMaxim Levitsky goto out;
610118d1b6aSMaxim Levitsky }
611118d1b6aSMaxim Levitsky
6121120407bSMax Reitz if (lbaf->ds < BDRV_SECTOR_BITS || lbaf->ds > 12 ||
6131120407bSMax Reitz (1 << lbaf->ds) > s->page_size)
6141120407bSMax Reitz {
6151120407bSMax Reitz error_setg(errp, "Namespace has unsupported block size (2^%d)",
6161120407bSMax Reitz lbaf->ds);
617118d1b6aSMaxim Levitsky goto out;
618118d1b6aSMaxim Levitsky }
619118d1b6aSMaxim Levitsky
6207a5f00ddSPhilippe Mathieu-Daudé ret = true;
621118d1b6aSMaxim Levitsky s->blkshift = lbaf->ds;
622bdd6a90aSFam Zheng out:
6234d980939SPhilippe Mathieu-Daudé qemu_vfio_dma_unmap(s->vfio, id);
6247a5f00ddSPhilippe Mathieu-Daudé
6257a5f00ddSPhilippe Mathieu-Daudé return ret;
626bdd6a90aSFam Zheng }
627bdd6a90aSFam Zheng
nvme_poll_queue(NVMeQueuePair * q)628826cc324SStefan Hajnoczi static void nvme_poll_queue(NVMeQueuePair *q)
629bdd6a90aSFam Zheng {
6302446e0e2SStefan Hajnoczi const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
6312446e0e2SStefan Hajnoczi NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
6322446e0e2SStefan Hajnoczi
6331c914cd1SPhilippe Mathieu-Daudé trace_nvme_poll_queue(q->s, q->index);
6342446e0e2SStefan Hajnoczi /*
6352446e0e2SStefan Hajnoczi * Do an early check for completions. q->lock isn't needed because
6362446e0e2SStefan Hajnoczi * nvme_process_completion() only runs in the event loop thread and
6372446e0e2SStefan Hajnoczi * cannot race with itself.
6382446e0e2SStefan Hajnoczi */
6392446e0e2SStefan Hajnoczi if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
640826cc324SStefan Hajnoczi return;
6412446e0e2SStefan Hajnoczi }
6422446e0e2SStefan Hajnoczi
643bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock);
644b75fd5f5SStefan Hajnoczi while (nvme_process_completion(q)) {
645bdd6a90aSFam Zheng /* Keep polling */
646bdd6a90aSFam Zheng }
647bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock);
6487a1fb2efSPhilippe Mathieu-Daudé }
6497a1fb2efSPhilippe Mathieu-Daudé
nvme_poll_queues(BDRVNVMeState * s)650826cc324SStefan Hajnoczi static void nvme_poll_queues(BDRVNVMeState *s)
6517a1fb2efSPhilippe Mathieu-Daudé {
6527a1fb2efSPhilippe Mathieu-Daudé int i;
6537a1fb2efSPhilippe Mathieu-Daudé
6541b539bd6SPhilippe Mathieu-Daudé for (i = 0; i < s->queue_count; i++) {
655826cc324SStefan Hajnoczi nvme_poll_queue(s->queues[i]);
6567a1fb2efSPhilippe Mathieu-Daudé }
657bdd6a90aSFam Zheng }
658bdd6a90aSFam Zheng
nvme_handle_event(EventNotifier * n)659bdd6a90aSFam Zheng static void nvme_handle_event(EventNotifier *n)
660bdd6a90aSFam Zheng {
661b111b3fcSPhilippe Mathieu-Daudé BDRVNVMeState *s = container_of(n, BDRVNVMeState,
662b111b3fcSPhilippe Mathieu-Daudé irq_notifier[MSIX_SHARED_IRQ_IDX]);
663bdd6a90aSFam Zheng
664bdd6a90aSFam Zheng trace_nvme_handle_event(s);
665bdd6a90aSFam Zheng event_notifier_test_and_clear(n);
666bdd6a90aSFam Zheng nvme_poll_queues(s);
667bdd6a90aSFam Zheng }
668bdd6a90aSFam Zheng
nvme_add_io_queue(BlockDriverState * bs,Error ** errp)669bdd6a90aSFam Zheng static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
670bdd6a90aSFam Zheng {
671bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
6721b539bd6SPhilippe Mathieu-Daudé unsigned n = s->queue_count;
673bdd6a90aSFam Zheng NVMeQueuePair *q;
674bdd6a90aSFam Zheng NvmeCmd cmd;
6751b539bd6SPhilippe Mathieu-Daudé unsigned queue_size = NVME_QUEUE_SIZE;
676bdd6a90aSFam Zheng
67776a24781SPhilippe Mathieu-Daudé assert(n <= UINT16_MAX);
6780a28b02eSPhilippe Mathieu-Daudé q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs),
6790a28b02eSPhilippe Mathieu-Daudé n, queue_size, errp);
680bdd6a90aSFam Zheng if (!q) {
681bdd6a90aSFam Zheng return false;
682bdd6a90aSFam Zheng }
683bdd6a90aSFam Zheng cmd = (NvmeCmd) {
684bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_CREATE_CQ,
685c26f2173SKlaus Jensen .dptr.prp1 = cpu_to_le64(q->cq.iova),
68676a24781SPhilippe Mathieu-Daudé .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
68776a24781SPhilippe Mathieu-Daudé .cdw11 = cpu_to_le32(NVME_CQ_IEN | NVME_CQ_PC),
688bdd6a90aSFam Zheng };
68908d54067SPhilippe Mathieu-Daudé if (nvme_admin_cmd_sync(bs, &cmd)) {
6901b539bd6SPhilippe Mathieu-Daudé error_setg(errp, "Failed to create CQ io queue [%u]", n);
691c8edbfb2SPhilippe Mathieu-Daudé goto out_error;
692bdd6a90aSFam Zheng }
693bdd6a90aSFam Zheng cmd = (NvmeCmd) {
694bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_CREATE_SQ,
695c26f2173SKlaus Jensen .dptr.prp1 = cpu_to_le64(q->sq.iova),
69676a24781SPhilippe Mathieu-Daudé .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
69776a24781SPhilippe Mathieu-Daudé .cdw11 = cpu_to_le32(NVME_SQ_PC | (n << 16)),
698bdd6a90aSFam Zheng };
69908d54067SPhilippe Mathieu-Daudé if (nvme_admin_cmd_sync(bs, &cmd)) {
7001b539bd6SPhilippe Mathieu-Daudé error_setg(errp, "Failed to create SQ io queue [%u]", n);
701c8edbfb2SPhilippe Mathieu-Daudé goto out_error;
702bdd6a90aSFam Zheng }
703bdd6a90aSFam Zheng s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
704bdd6a90aSFam Zheng s->queues[n] = q;
7051b539bd6SPhilippe Mathieu-Daudé s->queue_count++;
706bdd6a90aSFam Zheng return true;
707c8edbfb2SPhilippe Mathieu-Daudé out_error:
708c8edbfb2SPhilippe Mathieu-Daudé nvme_free_queue_pair(q);
709c8edbfb2SPhilippe Mathieu-Daudé return false;
710bdd6a90aSFam Zheng }
711bdd6a90aSFam Zheng
nvme_poll_cb(void * opaque)712bdd6a90aSFam Zheng static bool nvme_poll_cb(void *opaque)
713bdd6a90aSFam Zheng {
714bdd6a90aSFam Zheng EventNotifier *e = opaque;
715b111b3fcSPhilippe Mathieu-Daudé BDRVNVMeState *s = container_of(e, BDRVNVMeState,
716b111b3fcSPhilippe Mathieu-Daudé irq_notifier[MSIX_SHARED_IRQ_IDX]);
717826cc324SStefan Hajnoczi int i;
718bdd6a90aSFam Zheng
719826cc324SStefan Hajnoczi for (i = 0; i < s->queue_count; i++) {
720826cc324SStefan Hajnoczi NVMeQueuePair *q = s->queues[i];
721826cc324SStefan Hajnoczi const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
722826cc324SStefan Hajnoczi NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
723826cc324SStefan Hajnoczi
724826cc324SStefan Hajnoczi /*
725826cc324SStefan Hajnoczi * q->lock isn't needed because nvme_process_completion() only runs in
726826cc324SStefan Hajnoczi * the event loop thread and cannot race with itself.
727826cc324SStefan Hajnoczi */
728826cc324SStefan Hajnoczi if ((le16_to_cpu(cqe->status) & 0x1) != q->cq_phase) {
729826cc324SStefan Hajnoczi return true;
730826cc324SStefan Hajnoczi }
731826cc324SStefan Hajnoczi }
732826cc324SStefan Hajnoczi return false;
733826cc324SStefan Hajnoczi }
734826cc324SStefan Hajnoczi
nvme_poll_ready(EventNotifier * e)735826cc324SStefan Hajnoczi static void nvme_poll_ready(EventNotifier *e)
736826cc324SStefan Hajnoczi {
737826cc324SStefan Hajnoczi BDRVNVMeState *s = container_of(e, BDRVNVMeState,
738826cc324SStefan Hajnoczi irq_notifier[MSIX_SHARED_IRQ_IDX]);
739826cc324SStefan Hajnoczi
740826cc324SStefan Hajnoczi nvme_poll_queues(s);
741bdd6a90aSFam Zheng }
742bdd6a90aSFam Zheng
nvme_init(BlockDriverState * bs,const char * device,int namespace,Error ** errp)743bdd6a90aSFam Zheng static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
744bdd6a90aSFam Zheng Error **errp)
745bdd6a90aSFam Zheng {
746bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
74752b75ea8SPhilippe Mathieu-Daudé NVMeQueuePair *q;
7480a28b02eSPhilippe Mathieu-Daudé AioContext *aio_context = bdrv_get_aio_context(bs);
749bdd6a90aSFam Zheng int ret;
750bdd6a90aSFam Zheng uint64_t cap;
751fcc8672aSPhilippe Mathieu-Daudé uint32_t ver;
752bdd6a90aSFam Zheng uint64_t timeout_ms;
753bdd6a90aSFam Zheng uint64_t deadline, now;
7549406e0d9SPhilippe Mathieu-Daudé volatile NvmeBar *regs = NULL;
755bdd6a90aSFam Zheng
756bdd6a90aSFam Zheng qemu_co_mutex_init(&s->dma_map_lock);
757bdd6a90aSFam Zheng qemu_co_queue_init(&s->dma_flush_queue);
758cc61b074SMax Reitz s->device = g_strdup(device);
759bdd6a90aSFam Zheng s->nsid = namespace;
760bdd6a90aSFam Zheng s->aio_context = bdrv_get_aio_context(bs);
761b111b3fcSPhilippe Mathieu-Daudé ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
762bdd6a90aSFam Zheng if (ret) {
763bdd6a90aSFam Zheng error_setg(errp, "Failed to init event notifier");
764bdd6a90aSFam Zheng return ret;
765bdd6a90aSFam Zheng }
766bdd6a90aSFam Zheng
767bdd6a90aSFam Zheng s->vfio = qemu_vfio_open_pci(device, errp);
768bdd6a90aSFam Zheng if (!s->vfio) {
769bdd6a90aSFam Zheng ret = -EINVAL;
7709582f357SFam Zheng goto out;
771bdd6a90aSFam Zheng }
772bdd6a90aSFam Zheng
77337d7a45aSPhilippe Mathieu-Daudé regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, sizeof(NvmeBar),
774b02c01a5SPhilippe Mathieu-Daudé PROT_READ | PROT_WRITE, errp);
77537d7a45aSPhilippe Mathieu-Daudé if (!regs) {
776bdd6a90aSFam Zheng ret = -EINVAL;
7779582f357SFam Zheng goto out;
778bdd6a90aSFam Zheng }
779bdd6a90aSFam Zheng /* Perform initialize sequence as described in NVMe spec "7.6.1
780bdd6a90aSFam Zheng * Initialization". */
781bdd6a90aSFam Zheng
7829406e0d9SPhilippe Mathieu-Daudé cap = le64_to_cpu(regs->cap);
78315b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability_raw(cap);
78415b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Maximum Queue Entries Supported",
78515b2260bSPhilippe Mathieu-Daudé 1 + NVME_CAP_MQES(cap));
78615b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Contiguous Queues Required",
78715b2260bSPhilippe Mathieu-Daudé NVME_CAP_CQR(cap));
78815b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Doorbell Stride",
78997b709f3SPhilippe Mathieu-Daudé 1 << (2 + NVME_CAP_DSTRD(cap)));
79015b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Subsystem Reset Supported",
79115b2260bSPhilippe Mathieu-Daudé NVME_CAP_NSSRS(cap));
79215b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Memory Page Size Minimum",
79315b2260bSPhilippe Mathieu-Daudé 1 << (12 + NVME_CAP_MPSMIN(cap)));
79415b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Memory Page Size Maximum",
79515b2260bSPhilippe Mathieu-Daudé 1 << (12 + NVME_CAP_MPSMAX(cap)));
796fad1eb68SPhilippe Mathieu-Daudé if (!NVME_CAP_CSS(cap)) {
797bdd6a90aSFam Zheng error_setg(errp, "Device doesn't support NVMe command set");
798bdd6a90aSFam Zheng ret = -EINVAL;
7999582f357SFam Zheng goto out;
800bdd6a90aSFam Zheng }
801bdd6a90aSFam Zheng
802a652a3ecSPhilippe Mathieu-Daudé s->page_size = 1u << (12 + NVME_CAP_MPSMIN(cap));
803fad1eb68SPhilippe Mathieu-Daudé s->doorbell_scale = (4 << NVME_CAP_DSTRD(cap)) / sizeof(uint32_t);
804bdd6a90aSFam Zheng bs->bl.opt_mem_alignment = s->page_size;
805c8228ac3SPhilippe Mathieu-Daudé bs->bl.request_alignment = s->page_size;
806fad1eb68SPhilippe Mathieu-Daudé timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000);
807bdd6a90aSFam Zheng
808fcc8672aSPhilippe Mathieu-Daudé ver = le32_to_cpu(regs->vs);
809fcc8672aSPhilippe Mathieu-Daudé trace_nvme_controller_spec_version(extract32(ver, 16, 16),
810fcc8672aSPhilippe Mathieu-Daudé extract32(ver, 8, 8),
811fcc8672aSPhilippe Mathieu-Daudé extract32(ver, 0, 8));
812fcc8672aSPhilippe Mathieu-Daudé
813bdd6a90aSFam Zheng /* Reset device to get a clean state. */
8149406e0d9SPhilippe Mathieu-Daudé regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE);
815bdd6a90aSFam Zheng /* Wait for CSTS.RDY = 0. */
816e4f310feSPhilippe Mathieu-Daudé deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
817fad1eb68SPhilippe Mathieu-Daudé while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
818bdd6a90aSFam Zheng if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
819bdd6a90aSFam Zheng error_setg(errp, "Timeout while waiting for device to reset (%"
820bdd6a90aSFam Zheng PRId64 " ms)",
821bdd6a90aSFam Zheng timeout_ms);
822bdd6a90aSFam Zheng ret = -ETIMEDOUT;
8239582f357SFam Zheng goto out;
824bdd6a90aSFam Zheng }
825bdd6a90aSFam Zheng }
826bdd6a90aSFam Zheng
8274b19e9b8SPhilippe Mathieu-Daudé s->bar0_wo_map = qemu_vfio_pci_map_bar(s->vfio, 0, 0,
8284b19e9b8SPhilippe Mathieu-Daudé sizeof(NvmeBar) + NVME_DOORBELL_SIZE,
8294b19e9b8SPhilippe Mathieu-Daudé PROT_WRITE, errp);
8304b19e9b8SPhilippe Mathieu-Daudé s->doorbells = (void *)((uintptr_t)s->bar0_wo_map + sizeof(NvmeBar));
831f6845323SPhilippe Mathieu-Daudé if (!s->doorbells) {
832f6845323SPhilippe Mathieu-Daudé ret = -EINVAL;
833f6845323SPhilippe Mathieu-Daudé goto out;
834f6845323SPhilippe Mathieu-Daudé }
835f6845323SPhilippe Mathieu-Daudé
836bdd6a90aSFam Zheng /* Set up admin queue. */
837bdd6a90aSFam Zheng s->queues = g_new(NVMeQueuePair *, 1);
83852b75ea8SPhilippe Mathieu-Daudé q = nvme_create_queue_pair(s, aio_context, 0, NVME_QUEUE_SIZE, errp);
83952b75ea8SPhilippe Mathieu-Daudé if (!q) {
840bdd6a90aSFam Zheng ret = -EINVAL;
8419582f357SFam Zheng goto out;
842bdd6a90aSFam Zheng }
84352b75ea8SPhilippe Mathieu-Daudé s->queues[INDEX_ADMIN] = q;
8441b539bd6SPhilippe Mathieu-Daudé s->queue_count = 1;
8453c363c07SPhilippe Mathieu-Daudé QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000);
8463c363c07SPhilippe Mathieu-Daudé regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
8473c363c07SPhilippe Mathieu-Daudé ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
84852b75ea8SPhilippe Mathieu-Daudé regs->asq = cpu_to_le64(q->sq.iova);
84952b75ea8SPhilippe Mathieu-Daudé regs->acq = cpu_to_le64(q->cq.iova);
850bdd6a90aSFam Zheng
851bdd6a90aSFam Zheng /* After setting up all control registers we can enable device now. */
852fad1eb68SPhilippe Mathieu-Daudé regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
853fad1eb68SPhilippe Mathieu-Daudé (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
854fad1eb68SPhilippe Mathieu-Daudé CC_EN_MASK);
855bdd6a90aSFam Zheng /* Wait for CSTS.RDY = 1. */
856bdd6a90aSFam Zheng now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
857eefffb02SPhilippe Mathieu-Daudé deadline = now + timeout_ms * SCALE_MS;
858fad1eb68SPhilippe Mathieu-Daudé while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
859bdd6a90aSFam Zheng if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
860bdd6a90aSFam Zheng error_setg(errp, "Timeout while waiting for device to start (%"
861bdd6a90aSFam Zheng PRId64 " ms)",
862bdd6a90aSFam Zheng timeout_ms);
863bdd6a90aSFam Zheng ret = -ETIMEDOUT;
8649582f357SFam Zheng goto out;
865bdd6a90aSFam Zheng }
866bdd6a90aSFam Zheng }
867bdd6a90aSFam Zheng
868b111b3fcSPhilippe Mathieu-Daudé ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
869bdd6a90aSFam Zheng VFIO_PCI_MSIX_IRQ_INDEX, errp);
870bdd6a90aSFam Zheng if (ret) {
8719582f357SFam Zheng goto out;
872bdd6a90aSFam Zheng }
873b111b3fcSPhilippe Mathieu-Daudé aio_set_event_notifier(bdrv_get_aio_context(bs),
874b111b3fcSPhilippe Mathieu-Daudé &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
87560f782b6SStefan Hajnoczi nvme_handle_event, nvme_poll_cb,
876826cc324SStefan Hajnoczi nvme_poll_ready);
877bdd6a90aSFam Zheng
8787a5f00ddSPhilippe Mathieu-Daudé if (!nvme_identify(bs, namespace, errp)) {
879bdd6a90aSFam Zheng ret = -EIO;
8809582f357SFam Zheng goto out;
881bdd6a90aSFam Zheng }
882bdd6a90aSFam Zheng
883bdd6a90aSFam Zheng /* Set up command queues. */
884bdd6a90aSFam Zheng if (!nvme_add_io_queue(bs, errp)) {
885bdd6a90aSFam Zheng ret = -EIO;
886bdd6a90aSFam Zheng }
8879582f357SFam Zheng out:
88837d7a45aSPhilippe Mathieu-Daudé if (regs) {
88937d7a45aSPhilippe Mathieu-Daudé qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)regs, 0, sizeof(NvmeBar));
89037d7a45aSPhilippe Mathieu-Daudé }
89137d7a45aSPhilippe Mathieu-Daudé
892*d656aaa1SPaolo Bonzini /* Cleaning up is done in nvme_open() upon error. */
893bdd6a90aSFam Zheng return ret;
894bdd6a90aSFam Zheng }
895bdd6a90aSFam Zheng
896bdd6a90aSFam Zheng /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
897bdd6a90aSFam Zheng *
898bdd6a90aSFam Zheng * nvme://0000:44:00.0/1
899bdd6a90aSFam Zheng *
900bdd6a90aSFam Zheng * where the "nvme://" is a fixed form of the protocol prefix, the middle part
901bdd6a90aSFam Zheng * is the PCI address, and the last part is the namespace number starting from
902bdd6a90aSFam Zheng * 1 according to the NVMe spec. */
nvme_parse_filename(const char * filename,QDict * options,Error ** errp)903bdd6a90aSFam Zheng static void nvme_parse_filename(const char *filename, QDict *options,
904bdd6a90aSFam Zheng Error **errp)
905bdd6a90aSFam Zheng {
906bdd6a90aSFam Zheng int pref = strlen("nvme://");
907bdd6a90aSFam Zheng
908bdd6a90aSFam Zheng if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) {
909bdd6a90aSFam Zheng const char *tmp = filename + pref;
910bdd6a90aSFam Zheng char *device;
911bdd6a90aSFam Zheng const char *namespace;
912bdd6a90aSFam Zheng unsigned long ns;
913bdd6a90aSFam Zheng const char *slash = strchr(tmp, '/');
914bdd6a90aSFam Zheng if (!slash) {
915625eaca9SLaurent Vivier qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, tmp);
916bdd6a90aSFam Zheng return;
917bdd6a90aSFam Zheng }
918bdd6a90aSFam Zheng device = g_strndup(tmp, slash - tmp);
919625eaca9SLaurent Vivier qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, device);
920bdd6a90aSFam Zheng g_free(device);
921bdd6a90aSFam Zheng namespace = slash + 1;
922bdd6a90aSFam Zheng if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) {
923bdd6a90aSFam Zheng error_setg(errp, "Invalid namespace '%s', positive number expected",
924bdd6a90aSFam Zheng namespace);
925bdd6a90aSFam Zheng return;
926bdd6a90aSFam Zheng }
927625eaca9SLaurent Vivier qdict_put_str(options, NVME_BLOCK_OPT_NAMESPACE,
928625eaca9SLaurent Vivier *namespace ? namespace : "1");
929bdd6a90aSFam Zheng }
930bdd6a90aSFam Zheng }
931bdd6a90aSFam Zheng
nvme_enable_disable_write_cache(BlockDriverState * bs,bool enable,Error ** errp)932bdd6a90aSFam Zheng static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
933bdd6a90aSFam Zheng Error **errp)
934bdd6a90aSFam Zheng {
935bdd6a90aSFam Zheng int ret;
936bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
937bdd6a90aSFam Zheng NvmeCmd cmd = {
938bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_SET_FEATURES,
939bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid),
940bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(0x06),
941bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
942bdd6a90aSFam Zheng };
943bdd6a90aSFam Zheng
94408d54067SPhilippe Mathieu-Daudé ret = nvme_admin_cmd_sync(bs, &cmd);
945bdd6a90aSFam Zheng if (ret) {
946bdd6a90aSFam Zheng error_setg(errp, "Failed to configure NVMe write cache");
947bdd6a90aSFam Zheng }
948bdd6a90aSFam Zheng return ret;
949bdd6a90aSFam Zheng }
950bdd6a90aSFam Zheng
nvme_close(BlockDriverState * bs)951bdd6a90aSFam Zheng static void nvme_close(BlockDriverState *bs)
952bdd6a90aSFam Zheng {
953bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
954bdd6a90aSFam Zheng
9551b539bd6SPhilippe Mathieu-Daudé for (unsigned i = 0; i < s->queue_count; ++i) {
956b75fd5f5SStefan Hajnoczi nvme_free_queue_pair(s->queues[i]);
957bdd6a90aSFam Zheng }
9589582f357SFam Zheng g_free(s->queues);
959b111b3fcSPhilippe Mathieu-Daudé aio_set_event_notifier(bdrv_get_aio_context(bs),
960b111b3fcSPhilippe Mathieu-Daudé &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
96160f782b6SStefan Hajnoczi NULL, NULL, NULL);
962b111b3fcSPhilippe Mathieu-Daudé event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
9634b19e9b8SPhilippe Mathieu-Daudé qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map,
9644b19e9b8SPhilippe Mathieu-Daudé 0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE);
965bdd6a90aSFam Zheng qemu_vfio_close(s->vfio);
966cc61b074SMax Reitz
967cc61b074SMax Reitz g_free(s->device);
968bdd6a90aSFam Zheng }
969bdd6a90aSFam Zheng
nvme_open(BlockDriverState * bs,QDict * options,int flags,Error ** errp)970*d656aaa1SPaolo Bonzini static int nvme_open(BlockDriverState *bs, QDict *options, int flags,
971bdd6a90aSFam Zheng Error **errp)
972bdd6a90aSFam Zheng {
973bdd6a90aSFam Zheng const char *device;
974bdd6a90aSFam Zheng QemuOpts *opts;
975bdd6a90aSFam Zheng int namespace;
976bdd6a90aSFam Zheng int ret;
977bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
978bdd6a90aSFam Zheng
979e0dd95e3SMaxim Levitsky bs->supported_write_flags = BDRV_REQ_FUA;
980e0dd95e3SMaxim Levitsky
981bdd6a90aSFam Zheng opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
982bdd6a90aSFam Zheng qemu_opts_absorb_qdict(opts, options, &error_abort);
983bdd6a90aSFam Zheng device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE);
984bdd6a90aSFam Zheng if (!device) {
985bdd6a90aSFam Zheng error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required");
986bdd6a90aSFam Zheng qemu_opts_del(opts);
987bdd6a90aSFam Zheng return -EINVAL;
988bdd6a90aSFam Zheng }
989bdd6a90aSFam Zheng
990bdd6a90aSFam Zheng namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1);
991bdd6a90aSFam Zheng ret = nvme_init(bs, device, namespace, errp);
992bdd6a90aSFam Zheng qemu_opts_del(opts);
993bdd6a90aSFam Zheng if (ret) {
994bdd6a90aSFam Zheng goto fail;
995bdd6a90aSFam Zheng }
996bdd6a90aSFam Zheng if (flags & BDRV_O_NOCACHE) {
997bdd6a90aSFam Zheng if (!s->write_cache_supported) {
998bdd6a90aSFam Zheng error_setg(errp,
999bdd6a90aSFam Zheng "NVMe controller doesn't support write cache configuration");
1000bdd6a90aSFam Zheng ret = -EINVAL;
1001bdd6a90aSFam Zheng } else {
1002bdd6a90aSFam Zheng ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE),
1003bdd6a90aSFam Zheng errp);
1004bdd6a90aSFam Zheng }
1005bdd6a90aSFam Zheng if (ret) {
1006bdd6a90aSFam Zheng goto fail;
1007bdd6a90aSFam Zheng }
1008bdd6a90aSFam Zheng }
1009bdd6a90aSFam Zheng return 0;
1010bdd6a90aSFam Zheng fail:
1011bdd6a90aSFam Zheng nvme_close(bs);
1012bdd6a90aSFam Zheng return ret;
1013bdd6a90aSFam Zheng }
1014bdd6a90aSFam Zheng
nvme_co_getlength(BlockDriverState * bs)1015c86422c5SEmanuele Giuseppe Esposito static int64_t coroutine_fn nvme_co_getlength(BlockDriverState *bs)
1016bdd6a90aSFam Zheng {
1017bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
1018118d1b6aSMaxim Levitsky return s->nsze << s->blkshift;
1019118d1b6aSMaxim Levitsky }
1020bdd6a90aSFam Zheng
nvme_get_blocksize(BlockDriverState * bs)10211120407bSMax Reitz static uint32_t nvme_get_blocksize(BlockDriverState *bs)
1022118d1b6aSMaxim Levitsky {
1023118d1b6aSMaxim Levitsky BDRVNVMeState *s = bs->opaque;
10241120407bSMax Reitz assert(s->blkshift >= BDRV_SECTOR_BITS && s->blkshift <= 12);
10251120407bSMax Reitz return UINT32_C(1) << s->blkshift;
1026118d1b6aSMaxim Levitsky }
1027118d1b6aSMaxim Levitsky
nvme_probe_blocksizes(BlockDriverState * bs,BlockSizes * bsz)1028118d1b6aSMaxim Levitsky static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
1029118d1b6aSMaxim Levitsky {
10301120407bSMax Reitz uint32_t blocksize = nvme_get_blocksize(bs);
1031118d1b6aSMaxim Levitsky bsz->phys = blocksize;
1032118d1b6aSMaxim Levitsky bsz->log = blocksize;
1033118d1b6aSMaxim Levitsky return 0;
1034bdd6a90aSFam Zheng }
1035bdd6a90aSFam Zheng
1036bdd6a90aSFam Zheng /* Called with s->dma_map_lock */
nvme_cmd_unmap_qiov(BlockDriverState * bs,QEMUIOVector * qiov)1037bdd6a90aSFam Zheng static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
1038bdd6a90aSFam Zheng QEMUIOVector *qiov)
1039bdd6a90aSFam Zheng {
1040bdd6a90aSFam Zheng int r = 0;
1041bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
1042bdd6a90aSFam Zheng
1043bdd6a90aSFam Zheng s->dma_map_count -= qiov->size;
1044bdd6a90aSFam Zheng if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) {
1045bdd6a90aSFam Zheng r = qemu_vfio_dma_reset_temporary(s->vfio);
1046bdd6a90aSFam Zheng if (!r) {
1047bdd6a90aSFam Zheng qemu_co_queue_restart_all(&s->dma_flush_queue);
1048bdd6a90aSFam Zheng }
1049bdd6a90aSFam Zheng }
1050bdd6a90aSFam Zheng return r;
1051bdd6a90aSFam Zheng }
1052bdd6a90aSFam Zheng
1053bdd6a90aSFam Zheng /* Called with s->dma_map_lock */
nvme_cmd_map_qiov(BlockDriverState * bs,NvmeCmd * cmd,NVMeRequest * req,QEMUIOVector * qiov)1054bdd6a90aSFam Zheng static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
1055bdd6a90aSFam Zheng NVMeRequest *req, QEMUIOVector *qiov)
1056bdd6a90aSFam Zheng {
1057bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
1058bdd6a90aSFam Zheng uint64_t *pagelist = req->prp_list_page;
1059bdd6a90aSFam Zheng int i, j, r;
1060bdd6a90aSFam Zheng int entries = 0;
10619bd2788fSPhilippe Mathieu-Daudé Error *local_err = NULL, **errp = NULL;
1062bdd6a90aSFam Zheng
1063bdd6a90aSFam Zheng assert(qiov->size);
1064bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(qiov->size, s->page_size));
1065bdd6a90aSFam Zheng assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t));
1066bdd6a90aSFam Zheng for (i = 0; i < qiov->niov; ++i) {
1067bdd6a90aSFam Zheng bool retry = true;
1068bdd6a90aSFam Zheng uint64_t iova;
10699e13d598SEric Auger size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
10708e3b0cbbSMarc-André Lureau qemu_real_host_page_size());
1071bdd6a90aSFam Zheng try_map:
1072bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio,
1073bdd6a90aSFam Zheng qiov->iov[i].iov_base,
10749bd2788fSPhilippe Mathieu-Daudé len, true, &iova, errp);
107515a730e7SPhilippe Mathieu-Daudé if (r == -ENOSPC) {
107615a730e7SPhilippe Mathieu-Daudé /*
107715a730e7SPhilippe Mathieu-Daudé * In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
107815a730e7SPhilippe Mathieu-Daudé * ioctl returns -ENOSPC to signal the user exhausted the DMA
107915a730e7SPhilippe Mathieu-Daudé * mappings available for a container since Linux kernel commit
108015a730e7SPhilippe Mathieu-Daudé * 492855939bdb ("vfio/type1: Limit DMA mappings per container",
108115a730e7SPhilippe Mathieu-Daudé * April 2019, see CVE-2019-3882).
108215a730e7SPhilippe Mathieu-Daudé *
108315a730e7SPhilippe Mathieu-Daudé * This block driver already handles this error path by checking
108415a730e7SPhilippe Mathieu-Daudé * for the -ENOMEM error, so we directly replace -ENOSPC by
108515a730e7SPhilippe Mathieu-Daudé * -ENOMEM. Beside, -ENOSPC has a specific meaning for blockdev
108615a730e7SPhilippe Mathieu-Daudé * coroutines: it triggers BLOCKDEV_ON_ERROR_ENOSPC and
108715a730e7SPhilippe Mathieu-Daudé * BLOCK_ERROR_ACTION_STOP which stops the VM, asking the operator
108815a730e7SPhilippe Mathieu-Daudé * to add more storage to the blockdev. Not something we can do
108915a730e7SPhilippe Mathieu-Daudé * easily with an IOMMU :)
109015a730e7SPhilippe Mathieu-Daudé */
109115a730e7SPhilippe Mathieu-Daudé r = -ENOMEM;
109215a730e7SPhilippe Mathieu-Daudé }
1093bdd6a90aSFam Zheng if (r == -ENOMEM && retry) {
109415a730e7SPhilippe Mathieu-Daudé /*
109515a730e7SPhilippe Mathieu-Daudé * We exhausted the DMA mappings available for our container:
109615a730e7SPhilippe Mathieu-Daudé * recycle the volatile IOVA mappings.
109715a730e7SPhilippe Mathieu-Daudé */
1098bdd6a90aSFam Zheng retry = false;
1099bdd6a90aSFam Zheng trace_nvme_dma_flush_queue_wait(s);
1100bdd6a90aSFam Zheng if (s->dma_map_count) {
1101bdd6a90aSFam Zheng trace_nvme_dma_map_flush(s);
1102bdd6a90aSFam Zheng qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock);
1103bdd6a90aSFam Zheng } else {
1104bdd6a90aSFam Zheng r = qemu_vfio_dma_reset_temporary(s->vfio);
1105bdd6a90aSFam Zheng if (r) {
1106bdd6a90aSFam Zheng goto fail;
1107bdd6a90aSFam Zheng }
1108bdd6a90aSFam Zheng }
11099bd2788fSPhilippe Mathieu-Daudé errp = &local_err;
11109bd2788fSPhilippe Mathieu-Daudé
1111bdd6a90aSFam Zheng goto try_map;
1112bdd6a90aSFam Zheng }
1113bdd6a90aSFam Zheng if (r) {
1114bdd6a90aSFam Zheng goto fail;
1115bdd6a90aSFam Zheng }
1116bdd6a90aSFam Zheng
1117bdd6a90aSFam Zheng for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) {
11182916405aSLi Feng pagelist[entries++] = cpu_to_le64(iova + j * s->page_size);
1119bdd6a90aSFam Zheng }
1120bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base,
1121bdd6a90aSFam Zheng qiov->iov[i].iov_len / s->page_size);
1122bdd6a90aSFam Zheng }
1123bdd6a90aSFam Zheng
1124bdd6a90aSFam Zheng s->dma_map_count += qiov->size;
1125bdd6a90aSFam Zheng
1126bdd6a90aSFam Zheng assert(entries <= s->page_size / sizeof(uint64_t));
1127bdd6a90aSFam Zheng switch (entries) {
1128bdd6a90aSFam Zheng case 0:
1129bdd6a90aSFam Zheng abort();
1130bdd6a90aSFam Zheng case 1:
1131c26f2173SKlaus Jensen cmd->dptr.prp1 = pagelist[0];
1132c26f2173SKlaus Jensen cmd->dptr.prp2 = 0;
1133bdd6a90aSFam Zheng break;
1134bdd6a90aSFam Zheng case 2:
1135c26f2173SKlaus Jensen cmd->dptr.prp1 = pagelist[0];
1136c26f2173SKlaus Jensen cmd->dptr.prp2 = pagelist[1];
1137bdd6a90aSFam Zheng break;
1138bdd6a90aSFam Zheng default:
1139c26f2173SKlaus Jensen cmd->dptr.prp1 = pagelist[0];
1140c26f2173SKlaus Jensen cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
1141bdd6a90aSFam Zheng break;
1142bdd6a90aSFam Zheng }
1143bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
1144bdd6a90aSFam Zheng for (i = 0; i < entries; ++i) {
1145bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]);
1146bdd6a90aSFam Zheng }
1147bdd6a90aSFam Zheng return 0;
1148bdd6a90aSFam Zheng fail:
1149bdd6a90aSFam Zheng /* No need to unmap [0 - i) iovs even if we've failed, since we don't
1150bdd6a90aSFam Zheng * increment s->dma_map_count. This is okay for fixed mapping memory areas
1151bdd6a90aSFam Zheng * because they are already mapped before calling this function; for
1152bdd6a90aSFam Zheng * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
1153bdd6a90aSFam Zheng * calling qemu_vfio_dma_reset_temporary when necessary. */
11549bd2788fSPhilippe Mathieu-Daudé if (local_err) {
11559bd2788fSPhilippe Mathieu-Daudé error_reportf_err(local_err, "Cannot map buffer for DMA: ");
11569bd2788fSPhilippe Mathieu-Daudé }
1157bdd6a90aSFam Zheng return r;
1158bdd6a90aSFam Zheng }
1159bdd6a90aSFam Zheng
1160bdd6a90aSFam Zheng typedef struct {
1161bdd6a90aSFam Zheng Coroutine *co;
1162bdd6a90aSFam Zheng int ret;
1163bdd6a90aSFam Zheng AioContext *ctx;
1164bdd6a90aSFam Zheng } NVMeCoData;
1165bdd6a90aSFam Zheng
nvme_rw_cb_bh(void * opaque)1166bdd6a90aSFam Zheng static void nvme_rw_cb_bh(void *opaque)
1167bdd6a90aSFam Zheng {
1168bdd6a90aSFam Zheng NVMeCoData *data = opaque;
1169bdd6a90aSFam Zheng qemu_coroutine_enter(data->co);
1170bdd6a90aSFam Zheng }
1171bdd6a90aSFam Zheng
nvme_rw_cb(void * opaque,int ret)1172bdd6a90aSFam Zheng static void nvme_rw_cb(void *opaque, int ret)
1173bdd6a90aSFam Zheng {
1174bdd6a90aSFam Zheng NVMeCoData *data = opaque;
1175bdd6a90aSFam Zheng data->ret = ret;
1176bdd6a90aSFam Zheng if (!data->co) {
1177bdd6a90aSFam Zheng /* The rw coroutine hasn't yielded, don't try to enter. */
1178bdd6a90aSFam Zheng return;
1179bdd6a90aSFam Zheng }
1180e4ec5ad4SPavel Dovgalyuk replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data);
1181bdd6a90aSFam Zheng }
1182bdd6a90aSFam Zheng
nvme_co_prw_aligned(BlockDriverState * bs,uint64_t offset,uint64_t bytes,QEMUIOVector * qiov,bool is_write,int flags)1183bdd6a90aSFam Zheng static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
1184bdd6a90aSFam Zheng uint64_t offset, uint64_t bytes,
1185bdd6a90aSFam Zheng QEMUIOVector *qiov,
1186bdd6a90aSFam Zheng bool is_write,
1187bdd6a90aSFam Zheng int flags)
1188bdd6a90aSFam Zheng {
1189bdd6a90aSFam Zheng int r;
1190bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
119173159e52SPhilippe Mathieu-Daudé NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
1192bdd6a90aSFam Zheng NVMeRequest *req;
1193118d1b6aSMaxim Levitsky
1194118d1b6aSMaxim Levitsky uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
1195bdd6a90aSFam Zheng (flags & BDRV_REQ_FUA ? 1 << 30 : 0);
1196bdd6a90aSFam Zheng NvmeCmd cmd = {
1197bdd6a90aSFam Zheng .opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ,
1198bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid),
1199118d1b6aSMaxim Levitsky .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
1200118d1b6aSMaxim Levitsky .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
1201bdd6a90aSFam Zheng .cdw12 = cpu_to_le32(cdw12),
1202bdd6a90aSFam Zheng };
1203bdd6a90aSFam Zheng NVMeCoData data = {
1204bdd6a90aSFam Zheng .ctx = bdrv_get_aio_context(bs),
1205bdd6a90aSFam Zheng .ret = -EINPROGRESS,
1206bdd6a90aSFam Zheng };
1207bdd6a90aSFam Zheng
1208bdd6a90aSFam Zheng trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
12091b539bd6SPhilippe Mathieu-Daudé assert(s->queue_count > 1);
1210bdd6a90aSFam Zheng req = nvme_get_free_req(ioq);
1211bdd6a90aSFam Zheng assert(req);
1212bdd6a90aSFam Zheng
1213bdd6a90aSFam Zheng qemu_co_mutex_lock(&s->dma_map_lock);
1214bdd6a90aSFam Zheng r = nvme_cmd_map_qiov(bs, &cmd, req, qiov);
1215bdd6a90aSFam Zheng qemu_co_mutex_unlock(&s->dma_map_lock);
1216bdd6a90aSFam Zheng if (r) {
1217b75fd5f5SStefan Hajnoczi nvme_put_free_req_and_wake(ioq, req);
1218bdd6a90aSFam Zheng return r;
1219bdd6a90aSFam Zheng }
1220b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
1221bdd6a90aSFam Zheng
1222bdd6a90aSFam Zheng data.co = qemu_coroutine_self();
1223bdd6a90aSFam Zheng while (data.ret == -EINPROGRESS) {
1224bdd6a90aSFam Zheng qemu_coroutine_yield();
1225bdd6a90aSFam Zheng }
1226bdd6a90aSFam Zheng
1227bdd6a90aSFam Zheng qemu_co_mutex_lock(&s->dma_map_lock);
1228bdd6a90aSFam Zheng r = nvme_cmd_unmap_qiov(bs, qiov);
1229bdd6a90aSFam Zheng qemu_co_mutex_unlock(&s->dma_map_lock);
1230bdd6a90aSFam Zheng if (r) {
1231bdd6a90aSFam Zheng return r;
1232bdd6a90aSFam Zheng }
1233bdd6a90aSFam Zheng
1234bdd6a90aSFam Zheng trace_nvme_rw_done(s, is_write, offset, bytes, data.ret);
1235bdd6a90aSFam Zheng return data.ret;
1236bdd6a90aSFam Zheng }
1237bdd6a90aSFam Zheng
nvme_qiov_aligned(BlockDriverState * bs,const QEMUIOVector * qiov)1238bdd6a90aSFam Zheng static inline bool nvme_qiov_aligned(BlockDriverState *bs,
1239bdd6a90aSFam Zheng const QEMUIOVector *qiov)
1240bdd6a90aSFam Zheng {
1241bdd6a90aSFam Zheng int i;
1242bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
1243bdd6a90aSFam Zheng
1244bdd6a90aSFam Zheng for (i = 0; i < qiov->niov; ++i) {
12459e13d598SEric Auger if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
12468e3b0cbbSMarc-André Lureau qemu_real_host_page_size()) ||
12478e3b0cbbSMarc-André Lureau !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size())) {
1248bdd6a90aSFam Zheng trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
1249bdd6a90aSFam Zheng qiov->iov[i].iov_len, s->page_size);
1250bdd6a90aSFam Zheng return false;
1251bdd6a90aSFam Zheng }
1252bdd6a90aSFam Zheng }
1253bdd6a90aSFam Zheng return true;
1254bdd6a90aSFam Zheng }
1255bdd6a90aSFam Zheng
nvme_co_prw(BlockDriverState * bs,uint64_t offset,uint64_t bytes,QEMUIOVector * qiov,bool is_write,int flags)1256711b12e7SPaolo Bonzini static coroutine_fn int nvme_co_prw(BlockDriverState *bs,
1257711b12e7SPaolo Bonzini uint64_t offset, uint64_t bytes,
1258711b12e7SPaolo Bonzini QEMUIOVector *qiov, bool is_write,
1259711b12e7SPaolo Bonzini int flags)
1260bdd6a90aSFam Zheng {
1261bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
1262bdd6a90aSFam Zheng int r;
12634a613bd8SPhilippe Mathieu-Daudé QEMU_AUTO_VFREE uint8_t *buf = NULL;
1264bdd6a90aSFam Zheng QEMUIOVector local_qiov;
12658e3b0cbbSMarc-André Lureau size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size());
1266bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(offset, s->page_size));
1267bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(bytes, s->page_size));
1268bdd6a90aSFam Zheng assert(bytes <= s->max_transfer);
1269bdd6a90aSFam Zheng if (nvme_qiov_aligned(bs, qiov)) {
1270f25e7ab2SPhilippe Mathieu-Daudé s->stats.aligned_accesses++;
1271bdd6a90aSFam Zheng return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
1272bdd6a90aSFam Zheng }
1273f25e7ab2SPhilippe Mathieu-Daudé s->stats.unaligned_accesses++;
1274bdd6a90aSFam Zheng trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
12758e3b0cbbSMarc-André Lureau buf = qemu_try_memalign(qemu_real_host_page_size(), len);
1276bdd6a90aSFam Zheng
1277bdd6a90aSFam Zheng if (!buf) {
1278bdd6a90aSFam Zheng return -ENOMEM;
1279bdd6a90aSFam Zheng }
1280bdd6a90aSFam Zheng qemu_iovec_init(&local_qiov, 1);
1281bdd6a90aSFam Zheng if (is_write) {
1282bdd6a90aSFam Zheng qemu_iovec_to_buf(qiov, 0, buf, bytes);
1283bdd6a90aSFam Zheng }
1284bdd6a90aSFam Zheng qemu_iovec_add(&local_qiov, buf, bytes);
1285bdd6a90aSFam Zheng r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags);
1286bdd6a90aSFam Zheng qemu_iovec_destroy(&local_qiov);
1287bdd6a90aSFam Zheng if (!r && !is_write) {
1288bdd6a90aSFam Zheng qemu_iovec_from_buf(qiov, 0, buf, bytes);
1289bdd6a90aSFam Zheng }
1290bdd6a90aSFam Zheng return r;
1291bdd6a90aSFam Zheng }
1292bdd6a90aSFam Zheng
nvme_co_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)1293bdd6a90aSFam Zheng static coroutine_fn int nvme_co_preadv(BlockDriverState *bs,
1294f7ef38ddSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes,
1295f7ef38ddSVladimir Sementsov-Ogievskiy QEMUIOVector *qiov,
1296f7ef38ddSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
1297bdd6a90aSFam Zheng {
1298bdd6a90aSFam Zheng return nvme_co_prw(bs, offset, bytes, qiov, false, flags);
1299bdd6a90aSFam Zheng }
1300bdd6a90aSFam Zheng
nvme_co_pwritev(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)1301bdd6a90aSFam Zheng static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
1302e75abedaSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes,
1303e75abedaSVladimir Sementsov-Ogievskiy QEMUIOVector *qiov,
1304e75abedaSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
1305bdd6a90aSFam Zheng {
1306bdd6a90aSFam Zheng return nvme_co_prw(bs, offset, bytes, qiov, true, flags);
1307bdd6a90aSFam Zheng }
1308bdd6a90aSFam Zheng
nvme_co_flush(BlockDriverState * bs)1309bdd6a90aSFam Zheng static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
1310bdd6a90aSFam Zheng {
1311bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
131273159e52SPhilippe Mathieu-Daudé NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
1313bdd6a90aSFam Zheng NVMeRequest *req;
1314bdd6a90aSFam Zheng NvmeCmd cmd = {
1315bdd6a90aSFam Zheng .opcode = NVME_CMD_FLUSH,
1316bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid),
1317bdd6a90aSFam Zheng };
1318bdd6a90aSFam Zheng NVMeCoData data = {
1319bdd6a90aSFam Zheng .ctx = bdrv_get_aio_context(bs),
1320bdd6a90aSFam Zheng .ret = -EINPROGRESS,
1321bdd6a90aSFam Zheng };
1322bdd6a90aSFam Zheng
13231b539bd6SPhilippe Mathieu-Daudé assert(s->queue_count > 1);
1324bdd6a90aSFam Zheng req = nvme_get_free_req(ioq);
1325bdd6a90aSFam Zheng assert(req);
1326b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
1327bdd6a90aSFam Zheng
1328bdd6a90aSFam Zheng data.co = qemu_coroutine_self();
1329bdd6a90aSFam Zheng if (data.ret == -EINPROGRESS) {
1330bdd6a90aSFam Zheng qemu_coroutine_yield();
1331bdd6a90aSFam Zheng }
1332bdd6a90aSFam Zheng
1333bdd6a90aSFam Zheng return data.ret;
1334bdd6a90aSFam Zheng }
1335bdd6a90aSFam Zheng
1336bdd6a90aSFam Zheng
nvme_co_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags)1337e0dd95e3SMaxim Levitsky static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
1338e0dd95e3SMaxim Levitsky int64_t offset,
1339f34b2bcfSVladimir Sementsov-Ogievskiy int64_t bytes,
1340e0dd95e3SMaxim Levitsky BdrvRequestFlags flags)
1341e0dd95e3SMaxim Levitsky {
1342e0dd95e3SMaxim Levitsky BDRVNVMeState *s = bs->opaque;
134373159e52SPhilippe Mathieu-Daudé NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
1344e0dd95e3SMaxim Levitsky NVMeRequest *req;
1345f34b2bcfSVladimir Sementsov-Ogievskiy uint32_t cdw12;
1346e0dd95e3SMaxim Levitsky
1347e0dd95e3SMaxim Levitsky if (!s->supports_write_zeroes) {
1348e0dd95e3SMaxim Levitsky return -ENOTSUP;
1349e0dd95e3SMaxim Levitsky }
1350e0dd95e3SMaxim Levitsky
1351f34b2bcfSVladimir Sementsov-Ogievskiy if (bytes == 0) {
1352f34b2bcfSVladimir Sementsov-Ogievskiy return 0;
1353f34b2bcfSVladimir Sementsov-Ogievskiy }
1354f34b2bcfSVladimir Sementsov-Ogievskiy
1355f34b2bcfSVladimir Sementsov-Ogievskiy cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
1356f34b2bcfSVladimir Sementsov-Ogievskiy /*
1357f34b2bcfSVladimir Sementsov-Ogievskiy * We should not lose information. pwrite_zeroes_alignment and
1358f34b2bcfSVladimir Sementsov-Ogievskiy * max_pwrite_zeroes guarantees it.
1359f34b2bcfSVladimir Sementsov-Ogievskiy */
1360f34b2bcfSVladimir Sementsov-Ogievskiy assert(((cdw12 + 1) << s->blkshift) == bytes);
1361f34b2bcfSVladimir Sementsov-Ogievskiy
1362e0dd95e3SMaxim Levitsky NvmeCmd cmd = {
136369265150SKlaus Jensen .opcode = NVME_CMD_WRITE_ZEROES,
1364e0dd95e3SMaxim Levitsky .nsid = cpu_to_le32(s->nsid),
1365e0dd95e3SMaxim Levitsky .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
1366e0dd95e3SMaxim Levitsky .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
1367e0dd95e3SMaxim Levitsky };
1368e0dd95e3SMaxim Levitsky
1369e0dd95e3SMaxim Levitsky NVMeCoData data = {
1370e0dd95e3SMaxim Levitsky .ctx = bdrv_get_aio_context(bs),
1371e0dd95e3SMaxim Levitsky .ret = -EINPROGRESS,
1372e0dd95e3SMaxim Levitsky };
1373e0dd95e3SMaxim Levitsky
1374e0dd95e3SMaxim Levitsky if (flags & BDRV_REQ_MAY_UNMAP) {
1375e0dd95e3SMaxim Levitsky cdw12 |= (1 << 25);
1376e0dd95e3SMaxim Levitsky }
1377e0dd95e3SMaxim Levitsky
1378e0dd95e3SMaxim Levitsky if (flags & BDRV_REQ_FUA) {
1379e0dd95e3SMaxim Levitsky cdw12 |= (1 << 30);
1380e0dd95e3SMaxim Levitsky }
1381e0dd95e3SMaxim Levitsky
1382e0dd95e3SMaxim Levitsky cmd.cdw12 = cpu_to_le32(cdw12);
1383e0dd95e3SMaxim Levitsky
1384e0dd95e3SMaxim Levitsky trace_nvme_write_zeroes(s, offset, bytes, flags);
13851b539bd6SPhilippe Mathieu-Daudé assert(s->queue_count > 1);
1386e0dd95e3SMaxim Levitsky req = nvme_get_free_req(ioq);
1387e0dd95e3SMaxim Levitsky assert(req);
1388e0dd95e3SMaxim Levitsky
1389b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
1390e0dd95e3SMaxim Levitsky
1391e0dd95e3SMaxim Levitsky data.co = qemu_coroutine_self();
1392e0dd95e3SMaxim Levitsky while (data.ret == -EINPROGRESS) {
1393e0dd95e3SMaxim Levitsky qemu_coroutine_yield();
1394e0dd95e3SMaxim Levitsky }
1395e0dd95e3SMaxim Levitsky
1396e0dd95e3SMaxim Levitsky trace_nvme_rw_done(s, true, offset, bytes, data.ret);
1397e0dd95e3SMaxim Levitsky return data.ret;
1398e0dd95e3SMaxim Levitsky }
1399e0dd95e3SMaxim Levitsky
1400e0dd95e3SMaxim Levitsky
nvme_co_pdiscard(BlockDriverState * bs,int64_t offset,int64_t bytes)1401e87a09d6SMaxim Levitsky static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
1402e87a09d6SMaxim Levitsky int64_t offset,
14030c802287SVladimir Sementsov-Ogievskiy int64_t bytes)
1404e87a09d6SMaxim Levitsky {
1405e87a09d6SMaxim Levitsky BDRVNVMeState *s = bs->opaque;
140673159e52SPhilippe Mathieu-Daudé NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
1407e87a09d6SMaxim Levitsky NVMeRequest *req;
14084a613bd8SPhilippe Mathieu-Daudé QEMU_AUTO_VFREE NvmeDsmRange *buf = NULL;
1409e87a09d6SMaxim Levitsky QEMUIOVector local_qiov;
1410e87a09d6SMaxim Levitsky int ret;
1411e87a09d6SMaxim Levitsky
1412e87a09d6SMaxim Levitsky NvmeCmd cmd = {
1413e87a09d6SMaxim Levitsky .opcode = NVME_CMD_DSM,
1414e87a09d6SMaxim Levitsky .nsid = cpu_to_le32(s->nsid),
1415e87a09d6SMaxim Levitsky .cdw10 = cpu_to_le32(0), /*number of ranges - 0 based*/
1416e87a09d6SMaxim Levitsky .cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/
1417e87a09d6SMaxim Levitsky };
1418e87a09d6SMaxim Levitsky
1419e87a09d6SMaxim Levitsky NVMeCoData data = {
1420e87a09d6SMaxim Levitsky .ctx = bdrv_get_aio_context(bs),
1421e87a09d6SMaxim Levitsky .ret = -EINPROGRESS,
1422e87a09d6SMaxim Levitsky };
1423e87a09d6SMaxim Levitsky
1424e87a09d6SMaxim Levitsky if (!s->supports_discard) {
1425e87a09d6SMaxim Levitsky return -ENOTSUP;
1426e87a09d6SMaxim Levitsky }
1427e87a09d6SMaxim Levitsky
14281b539bd6SPhilippe Mathieu-Daudé assert(s->queue_count > 1);
1429e87a09d6SMaxim Levitsky
14300c802287SVladimir Sementsov-Ogievskiy /*
14310c802287SVladimir Sementsov-Ogievskiy * Filling the @buf requires @offset and @bytes to satisfy restrictions
14320c802287SVladimir Sementsov-Ogievskiy * defined in nvme_refresh_limits().
14330c802287SVladimir Sementsov-Ogievskiy */
14340c802287SVladimir Sementsov-Ogievskiy assert(QEMU_IS_ALIGNED(bytes, 1UL << s->blkshift));
14350c802287SVladimir Sementsov-Ogievskiy assert(QEMU_IS_ALIGNED(offset, 1UL << s->blkshift));
14360c802287SVladimir Sementsov-Ogievskiy assert((bytes >> s->blkshift) <= UINT32_MAX);
14370c802287SVladimir Sementsov-Ogievskiy
143838e1f818SPhilippe Mathieu-Daudé buf = qemu_try_memalign(s->page_size, s->page_size);
1439e87a09d6SMaxim Levitsky if (!buf) {
1440e87a09d6SMaxim Levitsky return -ENOMEM;
1441e87a09d6SMaxim Levitsky }
14422ed84693SPhilippe Mathieu-Daudé memset(buf, 0, s->page_size);
1443e87a09d6SMaxim Levitsky buf->nlb = cpu_to_le32(bytes >> s->blkshift);
1444e87a09d6SMaxim Levitsky buf->slba = cpu_to_le64(offset >> s->blkshift);
1445e87a09d6SMaxim Levitsky buf->cattr = 0;
1446e87a09d6SMaxim Levitsky
1447e87a09d6SMaxim Levitsky qemu_iovec_init(&local_qiov, 1);
1448e87a09d6SMaxim Levitsky qemu_iovec_add(&local_qiov, buf, 4096);
1449e87a09d6SMaxim Levitsky
1450e87a09d6SMaxim Levitsky req = nvme_get_free_req(ioq);
1451e87a09d6SMaxim Levitsky assert(req);
1452e87a09d6SMaxim Levitsky
1453e87a09d6SMaxim Levitsky qemu_co_mutex_lock(&s->dma_map_lock);
1454e87a09d6SMaxim Levitsky ret = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov);
1455e87a09d6SMaxim Levitsky qemu_co_mutex_unlock(&s->dma_map_lock);
1456e87a09d6SMaxim Levitsky
1457e87a09d6SMaxim Levitsky if (ret) {
1458b75fd5f5SStefan Hajnoczi nvme_put_free_req_and_wake(ioq, req);
1459e87a09d6SMaxim Levitsky goto out;
1460e87a09d6SMaxim Levitsky }
1461e87a09d6SMaxim Levitsky
1462e87a09d6SMaxim Levitsky trace_nvme_dsm(s, offset, bytes);
1463e87a09d6SMaxim Levitsky
1464b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
1465e87a09d6SMaxim Levitsky
1466e87a09d6SMaxim Levitsky data.co = qemu_coroutine_self();
1467e87a09d6SMaxim Levitsky while (data.ret == -EINPROGRESS) {
1468e87a09d6SMaxim Levitsky qemu_coroutine_yield();
1469e87a09d6SMaxim Levitsky }
1470e87a09d6SMaxim Levitsky
1471e87a09d6SMaxim Levitsky qemu_co_mutex_lock(&s->dma_map_lock);
1472e87a09d6SMaxim Levitsky ret = nvme_cmd_unmap_qiov(bs, &local_qiov);
1473e87a09d6SMaxim Levitsky qemu_co_mutex_unlock(&s->dma_map_lock);
1474e87a09d6SMaxim Levitsky
1475e87a09d6SMaxim Levitsky if (ret) {
1476e87a09d6SMaxim Levitsky goto out;
1477e87a09d6SMaxim Levitsky }
1478e87a09d6SMaxim Levitsky
1479e87a09d6SMaxim Levitsky ret = data.ret;
1480e87a09d6SMaxim Levitsky trace_nvme_dsm_done(s, offset, bytes, ret);
1481e87a09d6SMaxim Levitsky out:
1482e87a09d6SMaxim Levitsky qemu_iovec_destroy(&local_qiov);
1483e87a09d6SMaxim Levitsky return ret;
1484e87a09d6SMaxim Levitsky
1485e87a09d6SMaxim Levitsky }
1486e87a09d6SMaxim Levitsky
nvme_co_truncate(BlockDriverState * bs,int64_t offset,bool exact,PreallocMode prealloc,BdrvRequestFlags flags,Error ** errp)1487c8807c5eSPhilippe Mathieu-Daudé static int coroutine_fn nvme_co_truncate(BlockDriverState *bs, int64_t offset,
1488c8807c5eSPhilippe Mathieu-Daudé bool exact, PreallocMode prealloc,
1489c8807c5eSPhilippe Mathieu-Daudé BdrvRequestFlags flags, Error **errp)
1490c8807c5eSPhilippe Mathieu-Daudé {
1491c8807c5eSPhilippe Mathieu-Daudé int64_t cur_length;
1492c8807c5eSPhilippe Mathieu-Daudé
1493c8807c5eSPhilippe Mathieu-Daudé if (prealloc != PREALLOC_MODE_OFF) {
1494c8807c5eSPhilippe Mathieu-Daudé error_setg(errp, "Unsupported preallocation mode '%s'",
1495c8807c5eSPhilippe Mathieu-Daudé PreallocMode_str(prealloc));
1496c8807c5eSPhilippe Mathieu-Daudé return -ENOTSUP;
1497c8807c5eSPhilippe Mathieu-Daudé }
1498c8807c5eSPhilippe Mathieu-Daudé
1499c86422c5SEmanuele Giuseppe Esposito cur_length = nvme_co_getlength(bs);
1500c8807c5eSPhilippe Mathieu-Daudé if (offset != cur_length && exact) {
1501c8807c5eSPhilippe Mathieu-Daudé error_setg(errp, "Cannot resize NVMe devices");
1502c8807c5eSPhilippe Mathieu-Daudé return -ENOTSUP;
1503c8807c5eSPhilippe Mathieu-Daudé } else if (offset > cur_length) {
1504c8807c5eSPhilippe Mathieu-Daudé error_setg(errp, "Cannot grow NVMe devices");
1505c8807c5eSPhilippe Mathieu-Daudé return -EINVAL;
1506c8807c5eSPhilippe Mathieu-Daudé }
1507c8807c5eSPhilippe Mathieu-Daudé
1508c8807c5eSPhilippe Mathieu-Daudé return 0;
1509c8807c5eSPhilippe Mathieu-Daudé }
1510e87a09d6SMaxim Levitsky
nvme_reopen_prepare(BDRVReopenState * reopen_state,BlockReopenQueue * queue,Error ** errp)1511bdd6a90aSFam Zheng static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
1512bdd6a90aSFam Zheng BlockReopenQueue *queue, Error **errp)
1513bdd6a90aSFam Zheng {
1514bdd6a90aSFam Zheng return 0;
1515bdd6a90aSFam Zheng }
1516bdd6a90aSFam Zheng
nvme_refresh_filename(BlockDriverState * bs)1517998b3a1eSMax Reitz static void nvme_refresh_filename(BlockDriverState *bs)
1518bdd6a90aSFam Zheng {
1519cc61b074SMax Reitz BDRVNVMeState *s = bs->opaque;
1520bdd6a90aSFam Zheng
1521cc61b074SMax Reitz snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nvme://%s/%i",
1522cc61b074SMax Reitz s->device, s->nsid);
1523bdd6a90aSFam Zheng }
1524bdd6a90aSFam Zheng
nvme_refresh_limits(BlockDriverState * bs,Error ** errp)1525bdd6a90aSFam Zheng static void nvme_refresh_limits(BlockDriverState *bs, Error **errp)
1526bdd6a90aSFam Zheng {
1527bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
1528bdd6a90aSFam Zheng
1529bdd6a90aSFam Zheng bs->bl.opt_mem_alignment = s->page_size;
1530bdd6a90aSFam Zheng bs->bl.request_alignment = s->page_size;
1531bdd6a90aSFam Zheng bs->bl.max_transfer = s->max_transfer;
1532f34b2bcfSVladimir Sementsov-Ogievskiy
1533f34b2bcfSVladimir Sementsov-Ogievskiy /*
1534f34b2bcfSVladimir Sementsov-Ogievskiy * Look at nvme_co_pwrite_zeroes: after shift and decrement we should get
1535f34b2bcfSVladimir Sementsov-Ogievskiy * at most 0xFFFF
1536f34b2bcfSVladimir Sementsov-Ogievskiy */
1537f34b2bcfSVladimir Sementsov-Ogievskiy bs->bl.max_pwrite_zeroes = 1ULL << (s->blkshift + 16);
1538f34b2bcfSVladimir Sementsov-Ogievskiy bs->bl.pwrite_zeroes_alignment = MAX(bs->bl.request_alignment,
1539f34b2bcfSVladimir Sementsov-Ogievskiy 1UL << s->blkshift);
15400c802287SVladimir Sementsov-Ogievskiy
15410c802287SVladimir Sementsov-Ogievskiy bs->bl.max_pdiscard = (uint64_t)UINT32_MAX << s->blkshift;
15420c802287SVladimir Sementsov-Ogievskiy bs->bl.pdiscard_alignment = MAX(bs->bl.request_alignment,
15430c802287SVladimir Sementsov-Ogievskiy 1UL << s->blkshift);
1544bdd6a90aSFam Zheng }
1545bdd6a90aSFam Zheng
nvme_detach_aio_context(BlockDriverState * bs)1546bdd6a90aSFam Zheng static void nvme_detach_aio_context(BlockDriverState *bs)
1547bdd6a90aSFam Zheng {
1548bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
1549bdd6a90aSFam Zheng
15501b539bd6SPhilippe Mathieu-Daudé for (unsigned i = 0; i < s->queue_count; i++) {
15517838c67fSStefan Hajnoczi NVMeQueuePair *q = s->queues[i];
15527838c67fSStefan Hajnoczi
15537838c67fSStefan Hajnoczi qemu_bh_delete(q->completion_bh);
15547838c67fSStefan Hajnoczi q->completion_bh = NULL;
15557838c67fSStefan Hajnoczi }
15567838c67fSStefan Hajnoczi
1557b111b3fcSPhilippe Mathieu-Daudé aio_set_event_notifier(bdrv_get_aio_context(bs),
1558b111b3fcSPhilippe Mathieu-Daudé &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
155960f782b6SStefan Hajnoczi NULL, NULL, NULL);
1560bdd6a90aSFam Zheng }
1561bdd6a90aSFam Zheng
nvme_attach_aio_context(BlockDriverState * bs,AioContext * new_context)1562bdd6a90aSFam Zheng static void nvme_attach_aio_context(BlockDriverState *bs,
1563bdd6a90aSFam Zheng AioContext *new_context)
1564bdd6a90aSFam Zheng {
1565bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque;
1566bdd6a90aSFam Zheng
1567bdd6a90aSFam Zheng s->aio_context = new_context;
1568b111b3fcSPhilippe Mathieu-Daudé aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
156960f782b6SStefan Hajnoczi nvme_handle_event, nvme_poll_cb,
1570826cc324SStefan Hajnoczi nvme_poll_ready);
15717838c67fSStefan Hajnoczi
15721b539bd6SPhilippe Mathieu-Daudé for (unsigned i = 0; i < s->queue_count; i++) {
15737838c67fSStefan Hajnoczi NVMeQueuePair *q = s->queues[i];
15747838c67fSStefan Hajnoczi
15757838c67fSStefan Hajnoczi q->completion_bh =
15767838c67fSStefan Hajnoczi aio_bh_new(new_context, nvme_process_completion_bh, q);
15777838c67fSStefan Hajnoczi }
1578bdd6a90aSFam Zheng }
1579bdd6a90aSFam Zheng
nvme_register_buf(BlockDriverState * bs,void * host,size_t size,Error ** errp)1580f4ec04baSStefan Hajnoczi static bool nvme_register_buf(BlockDriverState *bs, void *host, size_t size,
1581f4ec04baSStefan Hajnoczi Error **errp)
15829ed61612SFam Zheng {
15839ed61612SFam Zheng int ret;
15849ed61612SFam Zheng BDRVNVMeState *s = bs->opaque;
15859ed61612SFam Zheng
1586f4ec04baSStefan Hajnoczi /*
1587f4ec04baSStefan Hajnoczi * FIXME: we may run out of IOVA addresses after repeated
15889ed61612SFam Zheng * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1589f4ec04baSStefan Hajnoczi * doesn't reclaim addresses for fixed mappings.
1590f4ec04baSStefan Hajnoczi */
1591f4ec04baSStefan Hajnoczi ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, errp);
1592f4ec04baSStefan Hajnoczi return ret == 0;
15939ed61612SFam Zheng }
15949ed61612SFam Zheng
nvme_unregister_buf(BlockDriverState * bs,void * host,size_t size)15954f384011SStefan Hajnoczi static void nvme_unregister_buf(BlockDriverState *bs, void *host, size_t size)
15969ed61612SFam Zheng {
15979ed61612SFam Zheng BDRVNVMeState *s = bs->opaque;
15989ed61612SFam Zheng
15999ed61612SFam Zheng qemu_vfio_dma_unmap(s->vfio, host);
16009ed61612SFam Zheng }
16019ed61612SFam Zheng
nvme_get_specific_stats(BlockDriverState * bs)1602f25e7ab2SPhilippe Mathieu-Daudé static BlockStatsSpecific *nvme_get_specific_stats(BlockDriverState *bs)
1603f25e7ab2SPhilippe Mathieu-Daudé {
1604f25e7ab2SPhilippe Mathieu-Daudé BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
1605f25e7ab2SPhilippe Mathieu-Daudé BDRVNVMeState *s = bs->opaque;
1606f25e7ab2SPhilippe Mathieu-Daudé
1607f25e7ab2SPhilippe Mathieu-Daudé stats->driver = BLOCKDEV_DRIVER_NVME;
1608f25e7ab2SPhilippe Mathieu-Daudé stats->u.nvme = (BlockStatsSpecificNvme) {
1609f25e7ab2SPhilippe Mathieu-Daudé .completion_errors = s->stats.completion_errors,
1610f25e7ab2SPhilippe Mathieu-Daudé .aligned_accesses = s->stats.aligned_accesses,
1611f25e7ab2SPhilippe Mathieu-Daudé .unaligned_accesses = s->stats.unaligned_accesses,
1612f25e7ab2SPhilippe Mathieu-Daudé };
1613f25e7ab2SPhilippe Mathieu-Daudé
1614f25e7ab2SPhilippe Mathieu-Daudé return stats;
1615f25e7ab2SPhilippe Mathieu-Daudé }
1616f25e7ab2SPhilippe Mathieu-Daudé
16172654267cSMax Reitz static const char *const nvme_strong_runtime_opts[] = {
16182654267cSMax Reitz NVME_BLOCK_OPT_DEVICE,
16192654267cSMax Reitz NVME_BLOCK_OPT_NAMESPACE,
16202654267cSMax Reitz
16212654267cSMax Reitz NULL
16222654267cSMax Reitz };
16232654267cSMax Reitz
1624bdd6a90aSFam Zheng static BlockDriver bdrv_nvme = {
1625bdd6a90aSFam Zheng .format_name = "nvme",
1626bdd6a90aSFam Zheng .protocol_name = "nvme",
1627bdd6a90aSFam Zheng .instance_size = sizeof(BDRVNVMeState),
1628bdd6a90aSFam Zheng
16295a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple,
16305a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple,
16315a5e7f8cSMaxim Levitsky
1632bdd6a90aSFam Zheng .bdrv_parse_filename = nvme_parse_filename,
1633*d656aaa1SPaolo Bonzini .bdrv_open = nvme_open,
1634bdd6a90aSFam Zheng .bdrv_close = nvme_close,
1635c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = nvme_co_getlength,
1636118d1b6aSMaxim Levitsky .bdrv_probe_blocksizes = nvme_probe_blocksizes,
1637c8807c5eSPhilippe Mathieu-Daudé .bdrv_co_truncate = nvme_co_truncate,
1638bdd6a90aSFam Zheng
1639bdd6a90aSFam Zheng .bdrv_co_preadv = nvme_co_preadv,
1640bdd6a90aSFam Zheng .bdrv_co_pwritev = nvme_co_pwritev,
1641e0dd95e3SMaxim Levitsky
1642e0dd95e3SMaxim Levitsky .bdrv_co_pwrite_zeroes = nvme_co_pwrite_zeroes,
1643e87a09d6SMaxim Levitsky .bdrv_co_pdiscard = nvme_co_pdiscard,
1644e0dd95e3SMaxim Levitsky
1645bdd6a90aSFam Zheng .bdrv_co_flush_to_disk = nvme_co_flush,
1646bdd6a90aSFam Zheng .bdrv_reopen_prepare = nvme_reopen_prepare,
1647bdd6a90aSFam Zheng
1648bdd6a90aSFam Zheng .bdrv_refresh_filename = nvme_refresh_filename,
1649bdd6a90aSFam Zheng .bdrv_refresh_limits = nvme_refresh_limits,
16502654267cSMax Reitz .strong_runtime_opts = nvme_strong_runtime_opts,
1651f25e7ab2SPhilippe Mathieu-Daudé .bdrv_get_specific_stats = nvme_get_specific_stats,
1652bdd6a90aSFam Zheng
1653bdd6a90aSFam Zheng .bdrv_detach_aio_context = nvme_detach_aio_context,
1654bdd6a90aSFam Zheng .bdrv_attach_aio_context = nvme_attach_aio_context,
1655bdd6a90aSFam Zheng
16569ed61612SFam Zheng .bdrv_register_buf = nvme_register_buf,
16579ed61612SFam Zheng .bdrv_unregister_buf = nvme_unregister_buf,
1658bdd6a90aSFam Zheng };
1659bdd6a90aSFam Zheng
bdrv_nvme_init(void)1660bdd6a90aSFam Zheng static void bdrv_nvme_init(void)
1661bdd6a90aSFam Zheng {
1662bdd6a90aSFam Zheng bdrv_register(&bdrv_nvme);
1663bdd6a90aSFam Zheng }
1664bdd6a90aSFam Zheng
1665bdd6a90aSFam Zheng block_init(bdrv_nvme_init);
1666