xref: /openbmc/qemu/hw/nvme/ctrl.c (revision 93e0932b)
1 /*
2  * QEMU NVM Express Controller
3  *
4  * Copyright (c) 2012, Intel Corporation
5  *
6  * Written by Keith Busch <keith.busch@intel.com>
7  *
8  * This code is licensed under the GNU GPL v2 or later.
9  */
10 
11 /**
12  * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e
13  *
14  *  https://nvmexpress.org/developers/nvme-specification/
15  *
16  *
17  * Notes on coding style
18  * ---------------------
19  * While QEMU coding style prefers lowercase hexadecimals in constants, the
20  * NVMe subsystem use thes format from the NVMe specifications in the comments
21  * (i.e. 'h' suffix instead of '0x' prefix).
22  *
23  * Usage
24  * -----
25  * See docs/system/nvme.rst for extensive documentation.
26  *
27  * Add options:
28  *      -drive file=<file>,if=none,id=<drive_id>
29  *      -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id>
30  *      -device nvme,serial=<serial>,id=<bus_name>, \
31  *              cmb_size_mb=<cmb_size_mb[optional]>, \
32  *              [pmrdev=<mem_backend_file_id>,] \
33  *              max_ioqpairs=<N[optional]>, \
34  *              aerl=<N[optional]>,aer_max_queued=<N[optional]>, \
35  *              mdts=<N[optional]>,vsl=<N[optional]>, \
36  *              zoned.zasl=<N[optional]>, \
37  *              zoned.auto_transition=<on|off[optional]>, \
38  *              sriov_max_vfs=<N[optional]> \
39  *              sriov_vq_flexible=<N[optional]> \
40  *              sriov_vi_flexible=<N[optional]> \
41  *              sriov_max_vi_per_vf=<N[optional]> \
42  *              sriov_max_vq_per_vf=<N[optional]> \
43  *              subsys=<subsys_id>
44  *      -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
45  *              zoned=<true|false[optional]>, \
46  *              subsys=<subsys_id>,detached=<true|false[optional]>
47  *
48  * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
49  * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the
50  * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to
51  * always enable the CMBLOC and CMBSZ registers (v1.3 behavior).
52  *
53  * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
54  * For example:
55  * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
56  *  size=<size> .... -device nvme,...,pmrdev=<mem_id>
57  *
58  * The PMR will use BAR 4/5 exclusively.
59  *
60  * To place controller(s) and namespace(s) to a subsystem, then provide
61  * nvme-subsys device as above.
62  *
63  * nvme subsystem device parameters
64  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
65  * - `nqn`
66  *   This parameter provides the `<nqn_id>` part of the string
67  *   `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field
68  *   of subsystem controllers. Note that `<nqn_id>` should be unique per
69  *   subsystem, but this is not enforced by QEMU. If not specified, it will
70  *   default to the value of the `id` parameter (`<subsys_id>`).
71  *
72  * nvme device parameters
73  * ~~~~~~~~~~~~~~~~~~~~~~
74  * - `subsys`
75  *   Specifying this parameter attaches the controller to the subsystem and
76  *   the SUBNQN field in the controller will report the NQN of the subsystem
77  *   device. This also enables multi controller capability represented in
78  *   Identify Controller data structure in CMIC (Controller Multi-path I/O and
79  *   Namespace Sharing Capabilities).
80  *
81  * - `aerl`
82  *   The Asynchronous Event Request Limit (AERL). Indicates the maximum number
83  *   of concurrently outstanding Asynchronous Event Request commands support
84  *   by the controller. This is a 0's based value.
85  *
86  * - `aer_max_queued`
87  *   This is the maximum number of events that the device will enqueue for
88  *   completion when there are no outstanding AERs. When the maximum number of
89  *   enqueued events are reached, subsequent events will be dropped.
90  *
91  * - `mdts`
92  *   Indicates the maximum data transfer size for a command that transfers data
93  *   between host-accessible memory and the controller. The value is specified
94  *   as a power of two (2^n) and is in units of the minimum memory page size
95  *   (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB).
96  *
97  * - `vsl`
98  *   Indicates the maximum data size limit for the Verify command. Like `mdts`,
99  *   this value is specified as a power of two (2^n) and is in units of the
100  *   minimum memory page size (CAP.MPSMIN). The default value is 7 (i.e. 512
101  *   KiB).
102  *
103  * - `zoned.zasl`
104  *   Indicates the maximum data transfer size for the Zone Append command. Like
105  *   `mdts`, the value is specified as a power of two (2^n) and is in units of
106  *   the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e.
107  *   defaulting to the value of `mdts`).
108  *
109  * - `zoned.auto_transition`
110  *   Indicates if zones in zone state implicitly opened can be automatically
111  *   transitioned to zone state closed for resource management purposes.
112  *   Defaults to 'on'.
113  *
114  * - `sriov_max_vfs`
115  *   Indicates the maximum number of PCIe virtual functions supported
116  *   by the controller. The default value is 0. Specifying a non-zero value
117  *   enables reporting of both SR-IOV and ARI capabilities by the NVMe device.
118  *   Virtual function controllers will not report SR-IOV capability.
119  *
120  *   NOTE: Single Root I/O Virtualization support is experimental.
121  *   All the related parameters may be subject to change.
122  *
123  * - `sriov_vq_flexible`
124  *   Indicates the total number of flexible queue resources assignable to all
125  *   the secondary controllers. Implicitly sets the number of primary
126  *   controller's private resources to `(max_ioqpairs - sriov_vq_flexible)`.
127  *
128  * - `sriov_vi_flexible`
129  *   Indicates the total number of flexible interrupt resources assignable to
130  *   all the secondary controllers. Implicitly sets the number of primary
131  *   controller's private resources to `(msix_qsize - sriov_vi_flexible)`.
132  *
133  * - `sriov_max_vi_per_vf`
134  *   Indicates the maximum number of virtual interrupt resources assignable
135  *   to a secondary controller. The default 0 resolves to
136  *   `(sriov_vi_flexible / sriov_max_vfs)`.
137  *
138  * - `sriov_max_vq_per_vf`
139  *   Indicates the maximum number of virtual queue resources assignable to
140  *   a secondary controller. The default 0 resolves to
141  *   `(sriov_vq_flexible / sriov_max_vfs)`.
142  *
143  * nvme namespace device parameters
144  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
145  * - `shared`
146  *   When the parent nvme device (as defined explicitly by the 'bus' parameter
147  *   or implicitly by the most recently defined NvmeBus) is linked to an
148  *   nvme-subsys device, the namespace will be attached to all controllers in
149  *   the subsystem. If set to 'off' (the default), the namespace will remain a
150  *   private namespace and may only be attached to a single controller at a
151  *   time.
152  *
153  * - `detached`
154  *   This parameter is only valid together with the `subsys` parameter. If left
155  *   at the default value (`false/off`), the namespace will be attached to all
156  *   controllers in the NVMe subsystem at boot-up. If set to `true/on`, the
157  *   namespace will be available in the subsystem but not attached to any
158  *   controllers.
159  *
160  * Setting `zoned` to true selects Zoned Command Set at the namespace.
161  * In this case, the following namespace properties are available to configure
162  * zoned operation:
163  *     zoned.zone_size=<zone size in bytes, default: 128MiB>
164  *         The number may be followed by K, M, G as in kilo-, mega- or giga-.
165  *
166  *     zoned.zone_capacity=<zone capacity in bytes, default: zone size>
167  *         The value 0 (default) forces zone capacity to be the same as zone
168  *         size. The value of this property may not exceed zone size.
169  *
170  *     zoned.descr_ext_size=<zone descriptor extension size, default 0>
171  *         This value needs to be specified in 64B units. If it is zero,
172  *         namespace(s) will not support zone descriptor extensions.
173  *
174  *     zoned.max_active=<Maximum Active Resources (zones), default: 0>
175  *         The default value means there is no limit to the number of
176  *         concurrently active zones.
177  *
178  *     zoned.max_open=<Maximum Open Resources (zones), default: 0>
179  *         The default value means there is no limit to the number of
180  *         concurrently open zones.
181  *
182  *     zoned.cross_read=<enable RAZB, default: false>
183  *         Setting this property to true enables Read Across Zone Boundaries.
184  */
185 
186 #include "qemu/osdep.h"
187 #include "qemu/cutils.h"
188 #include "qemu/error-report.h"
189 #include "qemu/log.h"
190 #include "qemu/units.h"
191 #include "qemu/range.h"
192 #include "qapi/error.h"
193 #include "qapi/visitor.h"
194 #include "sysemu/sysemu.h"
195 #include "sysemu/block-backend.h"
196 #include "sysemu/hostmem.h"
197 #include "hw/pci/msix.h"
198 #include "hw/pci/pcie_sriov.h"
199 #include "migration/vmstate.h"
200 
201 #include "nvme.h"
202 #include "dif.h"
203 #include "trace.h"
204 
205 #define NVME_MAX_IOQPAIRS 0xffff
206 #define NVME_DB_SIZE  4
207 #define NVME_SPEC_VER 0x00010400
208 #define NVME_CMB_BIR 2
209 #define NVME_PMR_BIR 4
210 #define NVME_TEMPERATURE 0x143
211 #define NVME_TEMPERATURE_WARNING 0x157
212 #define NVME_TEMPERATURE_CRITICAL 0x175
213 #define NVME_NUM_FW_SLOTS 1
214 #define NVME_DEFAULT_MAX_ZA_SIZE (128 * KiB)
215 #define NVME_MAX_VFS 127
216 #define NVME_VF_RES_GRANULARITY 1
217 #define NVME_VF_OFFSET 0x1
218 #define NVME_VF_STRIDE 1
219 
220 #define NVME_GUEST_ERR(trace, fmt, ...) \
221     do { \
222         (trace_##trace)(__VA_ARGS__); \
223         qemu_log_mask(LOG_GUEST_ERROR, #trace \
224             " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
225     } while (0)
226 
227 static const bool nvme_feature_support[NVME_FID_MAX] = {
228     [NVME_ARBITRATION]              = true,
229     [NVME_POWER_MANAGEMENT]         = true,
230     [NVME_TEMPERATURE_THRESHOLD]    = true,
231     [NVME_ERROR_RECOVERY]           = true,
232     [NVME_VOLATILE_WRITE_CACHE]     = true,
233     [NVME_NUMBER_OF_QUEUES]         = true,
234     [NVME_INTERRUPT_COALESCING]     = true,
235     [NVME_INTERRUPT_VECTOR_CONF]    = true,
236     [NVME_WRITE_ATOMICITY]          = true,
237     [NVME_ASYNCHRONOUS_EVENT_CONF]  = true,
238     [NVME_TIMESTAMP]                = true,
239     [NVME_HOST_BEHAVIOR_SUPPORT]    = true,
240     [NVME_COMMAND_SET_PROFILE]      = true,
241 };
242 
243 static const uint32_t nvme_feature_cap[NVME_FID_MAX] = {
244     [NVME_TEMPERATURE_THRESHOLD]    = NVME_FEAT_CAP_CHANGE,
245     [NVME_ERROR_RECOVERY]           = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS,
246     [NVME_VOLATILE_WRITE_CACHE]     = NVME_FEAT_CAP_CHANGE,
247     [NVME_NUMBER_OF_QUEUES]         = NVME_FEAT_CAP_CHANGE,
248     [NVME_ASYNCHRONOUS_EVENT_CONF]  = NVME_FEAT_CAP_CHANGE,
249     [NVME_TIMESTAMP]                = NVME_FEAT_CAP_CHANGE,
250     [NVME_HOST_BEHAVIOR_SUPPORT]    = NVME_FEAT_CAP_CHANGE,
251     [NVME_COMMAND_SET_PROFILE]      = NVME_FEAT_CAP_CHANGE,
252 };
253 
254 static const uint32_t nvme_cse_acs[256] = {
255     [NVME_ADM_CMD_DELETE_SQ]        = NVME_CMD_EFF_CSUPP,
256     [NVME_ADM_CMD_CREATE_SQ]        = NVME_CMD_EFF_CSUPP,
257     [NVME_ADM_CMD_GET_LOG_PAGE]     = NVME_CMD_EFF_CSUPP,
258     [NVME_ADM_CMD_DELETE_CQ]        = NVME_CMD_EFF_CSUPP,
259     [NVME_ADM_CMD_CREATE_CQ]        = NVME_CMD_EFF_CSUPP,
260     [NVME_ADM_CMD_IDENTIFY]         = NVME_CMD_EFF_CSUPP,
261     [NVME_ADM_CMD_ABORT]            = NVME_CMD_EFF_CSUPP,
262     [NVME_ADM_CMD_SET_FEATURES]     = NVME_CMD_EFF_CSUPP,
263     [NVME_ADM_CMD_GET_FEATURES]     = NVME_CMD_EFF_CSUPP,
264     [NVME_ADM_CMD_ASYNC_EV_REQ]     = NVME_CMD_EFF_CSUPP,
265     [NVME_ADM_CMD_NS_ATTACHMENT]    = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
266     [NVME_ADM_CMD_VIRT_MNGMT]       = NVME_CMD_EFF_CSUPP,
267     [NVME_ADM_CMD_DBBUF_CONFIG]     = NVME_CMD_EFF_CSUPP,
268     [NVME_ADM_CMD_FORMAT_NVM]       = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
269 };
270 
271 static const uint32_t nvme_cse_iocs_none[256];
272 
273 static const uint32_t nvme_cse_iocs_nvm[256] = {
274     [NVME_CMD_FLUSH]                = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
275     [NVME_CMD_WRITE_ZEROES]         = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
276     [NVME_CMD_WRITE]                = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
277     [NVME_CMD_READ]                 = NVME_CMD_EFF_CSUPP,
278     [NVME_CMD_DSM]                  = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
279     [NVME_CMD_VERIFY]               = NVME_CMD_EFF_CSUPP,
280     [NVME_CMD_COPY]                 = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
281     [NVME_CMD_COMPARE]              = NVME_CMD_EFF_CSUPP,
282 };
283 
284 static const uint32_t nvme_cse_iocs_zoned[256] = {
285     [NVME_CMD_FLUSH]                = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
286     [NVME_CMD_WRITE_ZEROES]         = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
287     [NVME_CMD_WRITE]                = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
288     [NVME_CMD_READ]                 = NVME_CMD_EFF_CSUPP,
289     [NVME_CMD_DSM]                  = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
290     [NVME_CMD_VERIFY]               = NVME_CMD_EFF_CSUPP,
291     [NVME_CMD_COPY]                 = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
292     [NVME_CMD_COMPARE]              = NVME_CMD_EFF_CSUPP,
293     [NVME_CMD_ZONE_APPEND]          = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
294     [NVME_CMD_ZONE_MGMT_SEND]       = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
295     [NVME_CMD_ZONE_MGMT_RECV]       = NVME_CMD_EFF_CSUPP,
296 };
297 
298 static void nvme_process_sq(void *opaque);
299 static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst);
300 
301 static uint16_t nvme_sqid(NvmeRequest *req)
302 {
303     return le16_to_cpu(req->sq->sqid);
304 }
305 
306 static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone,
307                                    NvmeZoneState state)
308 {
309     if (QTAILQ_IN_USE(zone, entry)) {
310         switch (nvme_get_zone_state(zone)) {
311         case NVME_ZONE_STATE_EXPLICITLY_OPEN:
312             QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry);
313             break;
314         case NVME_ZONE_STATE_IMPLICITLY_OPEN:
315             QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry);
316             break;
317         case NVME_ZONE_STATE_CLOSED:
318             QTAILQ_REMOVE(&ns->closed_zones, zone, entry);
319             break;
320         case NVME_ZONE_STATE_FULL:
321             QTAILQ_REMOVE(&ns->full_zones, zone, entry);
322         default:
323             ;
324         }
325     }
326 
327     nvme_set_zone_state(zone, state);
328 
329     switch (state) {
330     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
331         QTAILQ_INSERT_TAIL(&ns->exp_open_zones, zone, entry);
332         break;
333     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
334         QTAILQ_INSERT_TAIL(&ns->imp_open_zones, zone, entry);
335         break;
336     case NVME_ZONE_STATE_CLOSED:
337         QTAILQ_INSERT_TAIL(&ns->closed_zones, zone, entry);
338         break;
339     case NVME_ZONE_STATE_FULL:
340         QTAILQ_INSERT_TAIL(&ns->full_zones, zone, entry);
341     case NVME_ZONE_STATE_READ_ONLY:
342         break;
343     default:
344         zone->d.za = 0;
345     }
346 }
347 
348 static uint16_t nvme_zns_check_resources(NvmeNamespace *ns, uint32_t act,
349                                          uint32_t opn, uint32_t zrwa)
350 {
351     if (ns->params.max_active_zones != 0 &&
352         ns->nr_active_zones + act > ns->params.max_active_zones) {
353         trace_pci_nvme_err_insuff_active_res(ns->params.max_active_zones);
354         return NVME_ZONE_TOO_MANY_ACTIVE | NVME_DNR;
355     }
356 
357     if (ns->params.max_open_zones != 0 &&
358         ns->nr_open_zones + opn > ns->params.max_open_zones) {
359         trace_pci_nvme_err_insuff_open_res(ns->params.max_open_zones);
360         return NVME_ZONE_TOO_MANY_OPEN | NVME_DNR;
361     }
362 
363     if (zrwa > ns->zns.numzrwa) {
364         return NVME_NOZRWA | NVME_DNR;
365     }
366 
367     return NVME_SUCCESS;
368 }
369 
370 /*
371  * Check if we can open a zone without exceeding open/active limits.
372  * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5).
373  */
374 static uint16_t nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn)
375 {
376     return nvme_zns_check_resources(ns, act, opn, 0);
377 }
378 
379 static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
380 {
381     hwaddr hi, lo;
382 
383     if (!n->cmb.cmse) {
384         return false;
385     }
386 
387     lo = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba;
388     hi = lo + int128_get64(n->cmb.mem.size);
389 
390     return addr >= lo && addr < hi;
391 }
392 
393 static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
394 {
395     hwaddr base = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba;
396     return &n->cmb.buf[addr - base];
397 }
398 
399 static bool nvme_addr_is_pmr(NvmeCtrl *n, hwaddr addr)
400 {
401     hwaddr hi;
402 
403     if (!n->pmr.cmse) {
404         return false;
405     }
406 
407     hi = n->pmr.cba + int128_get64(n->pmr.dev->mr.size);
408 
409     return addr >= n->pmr.cba && addr < hi;
410 }
411 
412 static inline void *nvme_addr_to_pmr(NvmeCtrl *n, hwaddr addr)
413 {
414     return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba);
415 }
416 
417 static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr)
418 {
419     hwaddr hi, lo;
420 
421     /*
422      * The purpose of this check is to guard against invalid "local" access to
423      * the iomem (i.e. controller registers). Thus, we check against the range
424      * covered by the 'bar0' MemoryRegion since that is currently composed of
425      * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however,
426      * that if the device model is ever changed to allow the CMB to be located
427      * in BAR0 as well, then this must be changed.
428      */
429     lo = n->bar0.addr;
430     hi = lo + int128_get64(n->bar0.size);
431 
432     return addr >= lo && addr < hi;
433 }
434 
435 static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
436 {
437     hwaddr hi = addr + size - 1;
438     if (hi < addr) {
439         return 1;
440     }
441 
442     if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) {
443         memcpy(buf, nvme_addr_to_cmb(n, addr), size);
444         return 0;
445     }
446 
447     if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) {
448         memcpy(buf, nvme_addr_to_pmr(n, addr), size);
449         return 0;
450     }
451 
452     return pci_dma_read(PCI_DEVICE(n), addr, buf, size);
453 }
454 
455 static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, const void *buf, int size)
456 {
457     hwaddr hi = addr + size - 1;
458     if (hi < addr) {
459         return 1;
460     }
461 
462     if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) {
463         memcpy(nvme_addr_to_cmb(n, addr), buf, size);
464         return 0;
465     }
466 
467     if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) {
468         memcpy(nvme_addr_to_pmr(n, addr), buf, size);
469         return 0;
470     }
471 
472     return pci_dma_write(PCI_DEVICE(n), addr, buf, size);
473 }
474 
475 static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid)
476 {
477     return nsid &&
478         (nsid == NVME_NSID_BROADCAST || nsid <= NVME_MAX_NAMESPACES);
479 }
480 
481 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
482 {
483     return sqid < n->conf_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1;
484 }
485 
486 static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
487 {
488     return cqid < n->conf_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
489 }
490 
491 static void nvme_inc_cq_tail(NvmeCQueue *cq)
492 {
493     cq->tail++;
494     if (cq->tail >= cq->size) {
495         cq->tail = 0;
496         cq->phase = !cq->phase;
497     }
498 }
499 
500 static void nvme_inc_sq_head(NvmeSQueue *sq)
501 {
502     sq->head = (sq->head + 1) % sq->size;
503 }
504 
505 static uint8_t nvme_cq_full(NvmeCQueue *cq)
506 {
507     return (cq->tail + 1) % cq->size == cq->head;
508 }
509 
510 static uint8_t nvme_sq_empty(NvmeSQueue *sq)
511 {
512     return sq->head == sq->tail;
513 }
514 
515 static void nvme_irq_check(NvmeCtrl *n)
516 {
517     PCIDevice *pci = PCI_DEVICE(n);
518     uint32_t intms = ldl_le_p(&n->bar.intms);
519 
520     if (msix_enabled(pci)) {
521         return;
522     }
523     if (~intms & n->irq_status) {
524         pci_irq_assert(pci);
525     } else {
526         pci_irq_deassert(pci);
527     }
528 }
529 
530 static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
531 {
532     PCIDevice *pci = PCI_DEVICE(n);
533 
534     if (cq->irq_enabled) {
535         if (msix_enabled(pci)) {
536             trace_pci_nvme_irq_msix(cq->vector);
537             msix_notify(pci, cq->vector);
538         } else {
539             trace_pci_nvme_irq_pin();
540             assert(cq->vector < 32);
541             n->irq_status |= 1 << cq->vector;
542             nvme_irq_check(n);
543         }
544     } else {
545         trace_pci_nvme_irq_masked();
546     }
547 }
548 
549 static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
550 {
551     if (cq->irq_enabled) {
552         if (msix_enabled(PCI_DEVICE(n))) {
553             return;
554         } else {
555             assert(cq->vector < 32);
556             if (!n->cq_pending) {
557                 n->irq_status &= ~(1 << cq->vector);
558             }
559             nvme_irq_check(n);
560         }
561     }
562 }
563 
564 static void nvme_req_clear(NvmeRequest *req)
565 {
566     req->ns = NULL;
567     req->opaque = NULL;
568     req->aiocb = NULL;
569     memset(&req->cqe, 0x0, sizeof(req->cqe));
570     req->status = NVME_SUCCESS;
571 }
572 
573 static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma)
574 {
575     if (dma) {
576         pci_dma_sglist_init(&sg->qsg, PCI_DEVICE(n), 0);
577         sg->flags = NVME_SG_DMA;
578     } else {
579         qemu_iovec_init(&sg->iov, 0);
580     }
581 
582     sg->flags |= NVME_SG_ALLOC;
583 }
584 
585 static inline void nvme_sg_unmap(NvmeSg *sg)
586 {
587     if (!(sg->flags & NVME_SG_ALLOC)) {
588         return;
589     }
590 
591     if (sg->flags & NVME_SG_DMA) {
592         qemu_sglist_destroy(&sg->qsg);
593     } else {
594         qemu_iovec_destroy(&sg->iov);
595     }
596 
597     memset(sg, 0x0, sizeof(*sg));
598 }
599 
600 /*
601  * When metadata is transfered as extended LBAs, the DPTR mapped into `sg`
602  * holds both data and metadata. This function splits the data and metadata
603  * into two separate QSG/IOVs.
604  */
605 static void nvme_sg_split(NvmeSg *sg, NvmeNamespace *ns, NvmeSg *data,
606                           NvmeSg *mdata)
607 {
608     NvmeSg *dst = data;
609     uint32_t trans_len, count = ns->lbasz;
610     uint64_t offset = 0;
611     bool dma = sg->flags & NVME_SG_DMA;
612     size_t sge_len;
613     size_t sg_len = dma ? sg->qsg.size : sg->iov.size;
614     int sg_idx = 0;
615 
616     assert(sg->flags & NVME_SG_ALLOC);
617 
618     while (sg_len) {
619         sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len;
620 
621         trans_len = MIN(sg_len, count);
622         trans_len = MIN(trans_len, sge_len - offset);
623 
624         if (dst) {
625             if (dma) {
626                 qemu_sglist_add(&dst->qsg, sg->qsg.sg[sg_idx].base + offset,
627                                 trans_len);
628             } else {
629                 qemu_iovec_add(&dst->iov,
630                                sg->iov.iov[sg_idx].iov_base + offset,
631                                trans_len);
632             }
633         }
634 
635         sg_len -= trans_len;
636         count -= trans_len;
637         offset += trans_len;
638 
639         if (count == 0) {
640             dst = (dst == data) ? mdata : data;
641             count = (dst == data) ? ns->lbasz : ns->lbaf.ms;
642         }
643 
644         if (sge_len == offset) {
645             offset = 0;
646             sg_idx++;
647         }
648     }
649 }
650 
651 static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
652                                   size_t len)
653 {
654     if (!len) {
655         return NVME_SUCCESS;
656     }
657 
658     trace_pci_nvme_map_addr_cmb(addr, len);
659 
660     if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
661         return NVME_DATA_TRAS_ERROR;
662     }
663 
664     qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
665 
666     return NVME_SUCCESS;
667 }
668 
669 static uint16_t nvme_map_addr_pmr(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
670                                   size_t len)
671 {
672     if (!len) {
673         return NVME_SUCCESS;
674     }
675 
676     if (!nvme_addr_is_pmr(n, addr) || !nvme_addr_is_pmr(n, addr + len - 1)) {
677         return NVME_DATA_TRAS_ERROR;
678     }
679 
680     qemu_iovec_add(iov, nvme_addr_to_pmr(n, addr), len);
681 
682     return NVME_SUCCESS;
683 }
684 
685 static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len)
686 {
687     bool cmb = false, pmr = false;
688 
689     if (!len) {
690         return NVME_SUCCESS;
691     }
692 
693     trace_pci_nvme_map_addr(addr, len);
694 
695     if (nvme_addr_is_iomem(n, addr)) {
696         return NVME_DATA_TRAS_ERROR;
697     }
698 
699     if (nvme_addr_is_cmb(n, addr)) {
700         cmb = true;
701     } else if (nvme_addr_is_pmr(n, addr)) {
702         pmr = true;
703     }
704 
705     if (cmb || pmr) {
706         if (sg->flags & NVME_SG_DMA) {
707             return NVME_INVALID_USE_OF_CMB | NVME_DNR;
708         }
709 
710         if (sg->iov.niov + 1 > IOV_MAX) {
711             goto max_mappings_exceeded;
712         }
713 
714         if (cmb) {
715             return nvme_map_addr_cmb(n, &sg->iov, addr, len);
716         } else {
717             return nvme_map_addr_pmr(n, &sg->iov, addr, len);
718         }
719     }
720 
721     if (!(sg->flags & NVME_SG_DMA)) {
722         return NVME_INVALID_USE_OF_CMB | NVME_DNR;
723     }
724 
725     if (sg->qsg.nsg + 1 > IOV_MAX) {
726         goto max_mappings_exceeded;
727     }
728 
729     qemu_sglist_add(&sg->qsg, addr, len);
730 
731     return NVME_SUCCESS;
732 
733 max_mappings_exceeded:
734     NVME_GUEST_ERR(pci_nvme_ub_too_many_mappings,
735                    "number of mappings exceed 1024");
736     return NVME_INTERNAL_DEV_ERROR | NVME_DNR;
737 }
738 
739 static inline bool nvme_addr_is_dma(NvmeCtrl *n, hwaddr addr)
740 {
741     return !(nvme_addr_is_cmb(n, addr) || nvme_addr_is_pmr(n, addr));
742 }
743 
744 static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1,
745                              uint64_t prp2, uint32_t len)
746 {
747     hwaddr trans_len = n->page_size - (prp1 % n->page_size);
748     trans_len = MIN(len, trans_len);
749     int num_prps = (len >> n->page_bits) + 1;
750     uint16_t status;
751     int ret;
752 
753     trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps);
754 
755     nvme_sg_init(n, sg, nvme_addr_is_dma(n, prp1));
756 
757     status = nvme_map_addr(n, sg, prp1, trans_len);
758     if (status) {
759         goto unmap;
760     }
761 
762     len -= trans_len;
763     if (len) {
764         if (len > n->page_size) {
765             uint64_t prp_list[n->max_prp_ents];
766             uint32_t nents, prp_trans;
767             int i = 0;
768 
769             /*
770              * The first PRP list entry, pointed to by PRP2 may contain offset.
771              * Hence, we need to calculate the number of entries in based on
772              * that offset.
773              */
774             nents = (n->page_size - (prp2 & (n->page_size - 1))) >> 3;
775             prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
776             ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
777             if (ret) {
778                 trace_pci_nvme_err_addr_read(prp2);
779                 status = NVME_DATA_TRAS_ERROR;
780                 goto unmap;
781             }
782             while (len != 0) {
783                 uint64_t prp_ent = le64_to_cpu(prp_list[i]);
784 
785                 if (i == nents - 1 && len > n->page_size) {
786                     if (unlikely(prp_ent & (n->page_size - 1))) {
787                         trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
788                         status = NVME_INVALID_PRP_OFFSET | NVME_DNR;
789                         goto unmap;
790                     }
791 
792                     i = 0;
793                     nents = (len + n->page_size - 1) >> n->page_bits;
794                     nents = MIN(nents, n->max_prp_ents);
795                     prp_trans = nents * sizeof(uint64_t);
796                     ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
797                                          prp_trans);
798                     if (ret) {
799                         trace_pci_nvme_err_addr_read(prp_ent);
800                         status = NVME_DATA_TRAS_ERROR;
801                         goto unmap;
802                     }
803                     prp_ent = le64_to_cpu(prp_list[i]);
804                 }
805 
806                 if (unlikely(prp_ent & (n->page_size - 1))) {
807                     trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
808                     status = NVME_INVALID_PRP_OFFSET | NVME_DNR;
809                     goto unmap;
810                 }
811 
812                 trans_len = MIN(len, n->page_size);
813                 status = nvme_map_addr(n, sg, prp_ent, trans_len);
814                 if (status) {
815                     goto unmap;
816                 }
817 
818                 len -= trans_len;
819                 i++;
820             }
821         } else {
822             if (unlikely(prp2 & (n->page_size - 1))) {
823                 trace_pci_nvme_err_invalid_prp2_align(prp2);
824                 status = NVME_INVALID_PRP_OFFSET | NVME_DNR;
825                 goto unmap;
826             }
827             status = nvme_map_addr(n, sg, prp2, len);
828             if (status) {
829                 goto unmap;
830             }
831         }
832     }
833 
834     return NVME_SUCCESS;
835 
836 unmap:
837     nvme_sg_unmap(sg);
838     return status;
839 }
840 
841 /*
842  * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
843  * number of bytes mapped in len.
844  */
845 static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg,
846                                   NvmeSglDescriptor *segment, uint64_t nsgld,
847                                   size_t *len, NvmeCmd *cmd)
848 {
849     dma_addr_t addr, trans_len;
850     uint32_t dlen;
851     uint16_t status;
852 
853     for (int i = 0; i < nsgld; i++) {
854         uint8_t type = NVME_SGL_TYPE(segment[i].type);
855 
856         switch (type) {
857         case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
858             break;
859         case NVME_SGL_DESCR_TYPE_SEGMENT:
860         case NVME_SGL_DESCR_TYPE_LAST_SEGMENT:
861             return NVME_INVALID_NUM_SGL_DESCRS | NVME_DNR;
862         default:
863             return NVME_SGL_DESCR_TYPE_INVALID | NVME_DNR;
864         }
865 
866         dlen = le32_to_cpu(segment[i].len);
867 
868         if (!dlen) {
869             continue;
870         }
871 
872         if (*len == 0) {
873             /*
874              * All data has been mapped, but the SGL contains additional
875              * segments and/or descriptors. The controller might accept
876              * ignoring the rest of the SGL.
877              */
878             uint32_t sgls = le32_to_cpu(n->id_ctrl.sgls);
879             if (sgls & NVME_CTRL_SGLS_EXCESS_LENGTH) {
880                 break;
881             }
882 
883             trace_pci_nvme_err_invalid_sgl_excess_length(dlen);
884             return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
885         }
886 
887         trans_len = MIN(*len, dlen);
888 
889         addr = le64_to_cpu(segment[i].addr);
890 
891         if (UINT64_MAX - addr < dlen) {
892             return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
893         }
894 
895         status = nvme_map_addr(n, sg, addr, trans_len);
896         if (status) {
897             return status;
898         }
899 
900         *len -= trans_len;
901     }
902 
903     return NVME_SUCCESS;
904 }
905 
906 static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl,
907                              size_t len, NvmeCmd *cmd)
908 {
909     /*
910      * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
911      * dynamically allocating a potentially huge SGL. The spec allows the SGL
912      * to be larger (as in number of bytes required to describe the SGL
913      * descriptors and segment chain) than the command transfer size, so it is
914      * not bounded by MDTS.
915      */
916     const int SEG_CHUNK_SIZE = 256;
917 
918     NvmeSglDescriptor segment[SEG_CHUNK_SIZE], *sgld, *last_sgld;
919     uint64_t nsgld;
920     uint32_t seg_len;
921     uint16_t status;
922     hwaddr addr;
923     int ret;
924 
925     sgld = &sgl;
926     addr = le64_to_cpu(sgl.addr);
927 
928     trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl.type), len);
929 
930     nvme_sg_init(n, sg, nvme_addr_is_dma(n, addr));
931 
932     /*
933      * If the entire transfer can be described with a single data block it can
934      * be mapped directly.
935      */
936     if (NVME_SGL_TYPE(sgl.type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) {
937         status = nvme_map_sgl_data(n, sg, sgld, 1, &len, cmd);
938         if (status) {
939             goto unmap;
940         }
941 
942         goto out;
943     }
944 
945     for (;;) {
946         switch (NVME_SGL_TYPE(sgld->type)) {
947         case NVME_SGL_DESCR_TYPE_SEGMENT:
948         case NVME_SGL_DESCR_TYPE_LAST_SEGMENT:
949             break;
950         default:
951             return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
952         }
953 
954         seg_len = le32_to_cpu(sgld->len);
955 
956         /* check the length of the (Last) Segment descriptor */
957         if (!seg_len || seg_len & 0xf) {
958             return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
959         }
960 
961         if (UINT64_MAX - addr < seg_len) {
962             return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
963         }
964 
965         nsgld = seg_len / sizeof(NvmeSglDescriptor);
966 
967         while (nsgld > SEG_CHUNK_SIZE) {
968             if (nvme_addr_read(n, addr, segment, sizeof(segment))) {
969                 trace_pci_nvme_err_addr_read(addr);
970                 status = NVME_DATA_TRAS_ERROR;
971                 goto unmap;
972             }
973 
974             status = nvme_map_sgl_data(n, sg, segment, SEG_CHUNK_SIZE,
975                                        &len, cmd);
976             if (status) {
977                 goto unmap;
978             }
979 
980             nsgld -= SEG_CHUNK_SIZE;
981             addr += SEG_CHUNK_SIZE * sizeof(NvmeSglDescriptor);
982         }
983 
984         ret = nvme_addr_read(n, addr, segment, nsgld *
985                              sizeof(NvmeSglDescriptor));
986         if (ret) {
987             trace_pci_nvme_err_addr_read(addr);
988             status = NVME_DATA_TRAS_ERROR;
989             goto unmap;
990         }
991 
992         last_sgld = &segment[nsgld - 1];
993 
994         /*
995          * If the segment ends with a Data Block, then we are done.
996          */
997         if (NVME_SGL_TYPE(last_sgld->type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) {
998             status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd);
999             if (status) {
1000                 goto unmap;
1001             }
1002 
1003             goto out;
1004         }
1005 
1006         /*
1007          * If the last descriptor was not a Data Block, then the current
1008          * segment must not be a Last Segment.
1009          */
1010         if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) {
1011             status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
1012             goto unmap;
1013         }
1014 
1015         sgld = last_sgld;
1016         addr = le64_to_cpu(sgld->addr);
1017 
1018         /*
1019          * Do not map the last descriptor; it will be a Segment or Last Segment
1020          * descriptor and is handled by the next iteration.
1021          */
1022         status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, cmd);
1023         if (status) {
1024             goto unmap;
1025         }
1026     }
1027 
1028 out:
1029     /* if there is any residual left in len, the SGL was too short */
1030     if (len) {
1031         status = NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
1032         goto unmap;
1033     }
1034 
1035     return NVME_SUCCESS;
1036 
1037 unmap:
1038     nvme_sg_unmap(sg);
1039     return status;
1040 }
1041 
1042 uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len,
1043                        NvmeCmd *cmd)
1044 {
1045     uint64_t prp1, prp2;
1046 
1047     switch (NVME_CMD_FLAGS_PSDT(cmd->flags)) {
1048     case NVME_PSDT_PRP:
1049         prp1 = le64_to_cpu(cmd->dptr.prp1);
1050         prp2 = le64_to_cpu(cmd->dptr.prp2);
1051 
1052         return nvme_map_prp(n, sg, prp1, prp2, len);
1053     case NVME_PSDT_SGL_MPTR_CONTIGUOUS:
1054     case NVME_PSDT_SGL_MPTR_SGL:
1055         return nvme_map_sgl(n, sg, cmd->dptr.sgl, len, cmd);
1056     default:
1057         return NVME_INVALID_FIELD;
1058     }
1059 }
1060 
1061 static uint16_t nvme_map_mptr(NvmeCtrl *n, NvmeSg *sg, size_t len,
1062                               NvmeCmd *cmd)
1063 {
1064     int psdt = NVME_CMD_FLAGS_PSDT(cmd->flags);
1065     hwaddr mptr = le64_to_cpu(cmd->mptr);
1066     uint16_t status;
1067 
1068     if (psdt == NVME_PSDT_SGL_MPTR_SGL) {
1069         NvmeSglDescriptor sgl;
1070 
1071         if (nvme_addr_read(n, mptr, &sgl, sizeof(sgl))) {
1072             return NVME_DATA_TRAS_ERROR;
1073         }
1074 
1075         status = nvme_map_sgl(n, sg, sgl, len, cmd);
1076         if (status && (status & 0x7ff) == NVME_DATA_SGL_LEN_INVALID) {
1077             status = NVME_MD_SGL_LEN_INVALID | NVME_DNR;
1078         }
1079 
1080         return status;
1081     }
1082 
1083     nvme_sg_init(n, sg, nvme_addr_is_dma(n, mptr));
1084     status = nvme_map_addr(n, sg, mptr, len);
1085     if (status) {
1086         nvme_sg_unmap(sg);
1087     }
1088 
1089     return status;
1090 }
1091 
1092 static uint16_t nvme_map_data(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req)
1093 {
1094     NvmeNamespace *ns = req->ns;
1095     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
1096     bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps);
1097     bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT);
1098     size_t len = nvme_l2b(ns, nlb);
1099     uint16_t status;
1100 
1101     if (nvme_ns_ext(ns) &&
1102         !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) {
1103         NvmeSg sg;
1104 
1105         len += nvme_m2b(ns, nlb);
1106 
1107         status = nvme_map_dptr(n, &sg, len, &req->cmd);
1108         if (status) {
1109             return status;
1110         }
1111 
1112         nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA);
1113         nvme_sg_split(&sg, ns, &req->sg, NULL);
1114         nvme_sg_unmap(&sg);
1115 
1116         return NVME_SUCCESS;
1117     }
1118 
1119     return nvme_map_dptr(n, &req->sg, len, &req->cmd);
1120 }
1121 
1122 static uint16_t nvme_map_mdata(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req)
1123 {
1124     NvmeNamespace *ns = req->ns;
1125     size_t len = nvme_m2b(ns, nlb);
1126     uint16_t status;
1127 
1128     if (nvme_ns_ext(ns)) {
1129         NvmeSg sg;
1130 
1131         len += nvme_l2b(ns, nlb);
1132 
1133         status = nvme_map_dptr(n, &sg, len, &req->cmd);
1134         if (status) {
1135             return status;
1136         }
1137 
1138         nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA);
1139         nvme_sg_split(&sg, ns, NULL, &req->sg);
1140         nvme_sg_unmap(&sg);
1141 
1142         return NVME_SUCCESS;
1143     }
1144 
1145     return nvme_map_mptr(n, &req->sg, len, &req->cmd);
1146 }
1147 
1148 static uint16_t nvme_tx_interleaved(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr,
1149                                     uint32_t len, uint32_t bytes,
1150                                     int32_t skip_bytes, int64_t offset,
1151                                     NvmeTxDirection dir)
1152 {
1153     hwaddr addr;
1154     uint32_t trans_len, count = bytes;
1155     bool dma = sg->flags & NVME_SG_DMA;
1156     int64_t sge_len;
1157     int sg_idx = 0;
1158     int ret;
1159 
1160     assert(sg->flags & NVME_SG_ALLOC);
1161 
1162     while (len) {
1163         sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len;
1164 
1165         if (sge_len - offset < 0) {
1166             offset -= sge_len;
1167             sg_idx++;
1168             continue;
1169         }
1170 
1171         if (sge_len == offset) {
1172             offset = 0;
1173             sg_idx++;
1174             continue;
1175         }
1176 
1177         trans_len = MIN(len, count);
1178         trans_len = MIN(trans_len, sge_len - offset);
1179 
1180         if (dma) {
1181             addr = sg->qsg.sg[sg_idx].base + offset;
1182         } else {
1183             addr = (hwaddr)(uintptr_t)sg->iov.iov[sg_idx].iov_base + offset;
1184         }
1185 
1186         if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
1187             ret = nvme_addr_read(n, addr, ptr, trans_len);
1188         } else {
1189             ret = nvme_addr_write(n, addr, ptr, trans_len);
1190         }
1191 
1192         if (ret) {
1193             return NVME_DATA_TRAS_ERROR;
1194         }
1195 
1196         ptr += trans_len;
1197         len -= trans_len;
1198         count -= trans_len;
1199         offset += trans_len;
1200 
1201         if (count == 0) {
1202             count = bytes;
1203             offset += skip_bytes;
1204         }
1205     }
1206 
1207     return NVME_SUCCESS;
1208 }
1209 
1210 static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, void *ptr, uint32_t len,
1211                         NvmeTxDirection dir)
1212 {
1213     assert(sg->flags & NVME_SG_ALLOC);
1214 
1215     if (sg->flags & NVME_SG_DMA) {
1216         const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
1217         dma_addr_t residual;
1218 
1219         if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
1220             dma_buf_write(ptr, len, &residual, &sg->qsg, attrs);
1221         } else {
1222             dma_buf_read(ptr, len, &residual, &sg->qsg, attrs);
1223         }
1224 
1225         if (unlikely(residual)) {
1226             trace_pci_nvme_err_invalid_dma();
1227             return NVME_INVALID_FIELD | NVME_DNR;
1228         }
1229     } else {
1230         size_t bytes;
1231 
1232         if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
1233             bytes = qemu_iovec_to_buf(&sg->iov, 0, ptr, len);
1234         } else {
1235             bytes = qemu_iovec_from_buf(&sg->iov, 0, ptr, len);
1236         }
1237 
1238         if (unlikely(bytes != len)) {
1239             trace_pci_nvme_err_invalid_dma();
1240             return NVME_INVALID_FIELD | NVME_DNR;
1241         }
1242     }
1243 
1244     return NVME_SUCCESS;
1245 }
1246 
1247 static inline uint16_t nvme_c2h(NvmeCtrl *n, void *ptr, uint32_t len,
1248                                 NvmeRequest *req)
1249 {
1250     uint16_t status;
1251 
1252     status = nvme_map_dptr(n, &req->sg, len, &req->cmd);
1253     if (status) {
1254         return status;
1255     }
1256 
1257     return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE);
1258 }
1259 
1260 static inline uint16_t nvme_h2c(NvmeCtrl *n, void *ptr, uint32_t len,
1261                                 NvmeRequest *req)
1262 {
1263     uint16_t status;
1264 
1265     status = nvme_map_dptr(n, &req->sg, len, &req->cmd);
1266     if (status) {
1267         return status;
1268     }
1269 
1270     return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE);
1271 }
1272 
1273 uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len,
1274                           NvmeTxDirection dir, NvmeRequest *req)
1275 {
1276     NvmeNamespace *ns = req->ns;
1277     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
1278     bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps);
1279     bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT);
1280 
1281     if (nvme_ns_ext(ns) &&
1282         !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) {
1283         return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbasz,
1284                                    ns->lbaf.ms, 0, dir);
1285     }
1286 
1287     return nvme_tx(n, &req->sg, ptr, len, dir);
1288 }
1289 
1290 uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len,
1291                            NvmeTxDirection dir, NvmeRequest *req)
1292 {
1293     NvmeNamespace *ns = req->ns;
1294     uint16_t status;
1295 
1296     if (nvme_ns_ext(ns)) {
1297         return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbaf.ms,
1298                                    ns->lbasz, ns->lbasz, dir);
1299     }
1300 
1301     nvme_sg_unmap(&req->sg);
1302 
1303     status = nvme_map_mptr(n, &req->sg, len, &req->cmd);
1304     if (status) {
1305         return status;
1306     }
1307 
1308     return nvme_tx(n, &req->sg, ptr, len, dir);
1309 }
1310 
1311 static inline void nvme_blk_read(BlockBackend *blk, int64_t offset,
1312                                  BlockCompletionFunc *cb, NvmeRequest *req)
1313 {
1314     assert(req->sg.flags & NVME_SG_ALLOC);
1315 
1316     if (req->sg.flags & NVME_SG_DMA) {
1317         req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
1318                                   cb, req);
1319     } else {
1320         req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req);
1321     }
1322 }
1323 
1324 static inline void nvme_blk_write(BlockBackend *blk, int64_t offset,
1325                                   BlockCompletionFunc *cb, NvmeRequest *req)
1326 {
1327     assert(req->sg.flags & NVME_SG_ALLOC);
1328 
1329     if (req->sg.flags & NVME_SG_DMA) {
1330         req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
1331                                    cb, req);
1332     } else {
1333         req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req);
1334     }
1335 }
1336 
1337 static void nvme_update_cq_eventidx(const NvmeCQueue *cq)
1338 {
1339     uint32_t v = cpu_to_le32(cq->head);
1340 
1341     trace_pci_nvme_update_cq_eventidx(cq->cqid, cq->head);
1342 
1343     pci_dma_write(PCI_DEVICE(cq->ctrl), cq->ei_addr, &v, sizeof(v));
1344 }
1345 
1346 static void nvme_update_cq_head(NvmeCQueue *cq)
1347 {
1348     uint32_t v;
1349 
1350     pci_dma_read(PCI_DEVICE(cq->ctrl), cq->db_addr, &v, sizeof(v));
1351 
1352     cq->head = le32_to_cpu(v);
1353 
1354     trace_pci_nvme_update_cq_head(cq->cqid, cq->head);
1355 }
1356 
1357 static void nvme_post_cqes(void *opaque)
1358 {
1359     NvmeCQueue *cq = opaque;
1360     NvmeCtrl *n = cq->ctrl;
1361     NvmeRequest *req, *next;
1362     bool pending = cq->head != cq->tail;
1363     int ret;
1364 
1365     QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
1366         NvmeSQueue *sq;
1367         hwaddr addr;
1368 
1369         if (n->dbbuf_enabled) {
1370             nvme_update_cq_eventidx(cq);
1371             nvme_update_cq_head(cq);
1372         }
1373 
1374         if (nvme_cq_full(cq)) {
1375             break;
1376         }
1377 
1378         sq = req->sq;
1379         req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
1380         req->cqe.sq_id = cpu_to_le16(sq->sqid);
1381         req->cqe.sq_head = cpu_to_le16(sq->head);
1382         addr = cq->dma_addr + cq->tail * n->cqe_size;
1383         ret = pci_dma_write(PCI_DEVICE(n), addr, (void *)&req->cqe,
1384                             sizeof(req->cqe));
1385         if (ret) {
1386             trace_pci_nvme_err_addr_write(addr);
1387             trace_pci_nvme_err_cfs();
1388             stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
1389             break;
1390         }
1391         QTAILQ_REMOVE(&cq->req_list, req, entry);
1392         nvme_inc_cq_tail(cq);
1393         nvme_sg_unmap(&req->sg);
1394         QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
1395     }
1396     if (cq->tail != cq->head) {
1397         if (cq->irq_enabled && !pending) {
1398             n->cq_pending++;
1399         }
1400 
1401         nvme_irq_assert(n, cq);
1402     }
1403 }
1404 
1405 static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
1406 {
1407     assert(cq->cqid == req->sq->cqid);
1408     trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid,
1409                                           le32_to_cpu(req->cqe.result),
1410                                           le32_to_cpu(req->cqe.dw1),
1411                                           req->status);
1412 
1413     if (req->status) {
1414         trace_pci_nvme_err_req_status(nvme_cid(req), nvme_nsid(req->ns),
1415                                       req->status, req->cmd.opcode);
1416     }
1417 
1418     QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
1419     QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
1420 
1421     qemu_bh_schedule(cq->bh);
1422 }
1423 
1424 static void nvme_process_aers(void *opaque)
1425 {
1426     NvmeCtrl *n = opaque;
1427     NvmeAsyncEvent *event, *next;
1428 
1429     trace_pci_nvme_process_aers(n->aer_queued);
1430 
1431     QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) {
1432         NvmeRequest *req;
1433         NvmeAerResult *result;
1434 
1435         /* can't post cqe if there is nothing to complete */
1436         if (!n->outstanding_aers) {
1437             trace_pci_nvme_no_outstanding_aers();
1438             break;
1439         }
1440 
1441         /* ignore if masked (cqe posted, but event not cleared) */
1442         if (n->aer_mask & (1 << event->result.event_type)) {
1443             trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask);
1444             continue;
1445         }
1446 
1447         QTAILQ_REMOVE(&n->aer_queue, event, entry);
1448         n->aer_queued--;
1449 
1450         n->aer_mask |= 1 << event->result.event_type;
1451         n->outstanding_aers--;
1452 
1453         req = n->aer_reqs[n->outstanding_aers];
1454 
1455         result = (NvmeAerResult *) &req->cqe.result;
1456         result->event_type = event->result.event_type;
1457         result->event_info = event->result.event_info;
1458         result->log_page = event->result.log_page;
1459         g_free(event);
1460 
1461         trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info,
1462                                     result->log_page);
1463 
1464         nvme_enqueue_req_completion(&n->admin_cq, req);
1465     }
1466 }
1467 
1468 static void nvme_enqueue_event(NvmeCtrl *n, uint8_t event_type,
1469                                uint8_t event_info, uint8_t log_page)
1470 {
1471     NvmeAsyncEvent *event;
1472 
1473     trace_pci_nvme_enqueue_event(event_type, event_info, log_page);
1474 
1475     if (n->aer_queued == n->params.aer_max_queued) {
1476         trace_pci_nvme_enqueue_event_noqueue(n->aer_queued);
1477         return;
1478     }
1479 
1480     event = g_new(NvmeAsyncEvent, 1);
1481     event->result = (NvmeAerResult) {
1482         .event_type = event_type,
1483         .event_info = event_info,
1484         .log_page   = log_page,
1485     };
1486 
1487     QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry);
1488     n->aer_queued++;
1489 
1490     nvme_process_aers(n);
1491 }
1492 
1493 static void nvme_smart_event(NvmeCtrl *n, uint8_t event)
1494 {
1495     uint8_t aer_info;
1496 
1497     /* Ref SPEC <Asynchronous Event Information 0x2013 SMART / Health Status> */
1498     if (!(NVME_AEC_SMART(n->features.async_config) & event)) {
1499         return;
1500     }
1501 
1502     switch (event) {
1503     case NVME_SMART_SPARE:
1504         aer_info = NVME_AER_INFO_SMART_SPARE_THRESH;
1505         break;
1506     case NVME_SMART_TEMPERATURE:
1507         aer_info = NVME_AER_INFO_SMART_TEMP_THRESH;
1508         break;
1509     case NVME_SMART_RELIABILITY:
1510     case NVME_SMART_MEDIA_READ_ONLY:
1511     case NVME_SMART_FAILED_VOLATILE_MEDIA:
1512     case NVME_SMART_PMR_UNRELIABLE:
1513         aer_info = NVME_AER_INFO_SMART_RELIABILITY;
1514         break;
1515     default:
1516         return;
1517     }
1518 
1519     nvme_enqueue_event(n, NVME_AER_TYPE_SMART, aer_info, NVME_LOG_SMART_INFO);
1520 }
1521 
1522 static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type)
1523 {
1524     n->aer_mask &= ~(1 << event_type);
1525     if (!QTAILQ_EMPTY(&n->aer_queue)) {
1526         nvme_process_aers(n);
1527     }
1528 }
1529 
1530 static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len)
1531 {
1532     uint8_t mdts = n->params.mdts;
1533 
1534     if (mdts && len > n->page_size << mdts) {
1535         trace_pci_nvme_err_mdts(len);
1536         return NVME_INVALID_FIELD | NVME_DNR;
1537     }
1538 
1539     return NVME_SUCCESS;
1540 }
1541 
1542 static inline uint16_t nvme_check_bounds(NvmeNamespace *ns, uint64_t slba,
1543                                          uint32_t nlb)
1544 {
1545     uint64_t nsze = le64_to_cpu(ns->id_ns.nsze);
1546 
1547     if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) {
1548         trace_pci_nvme_err_invalid_lba_range(slba, nlb, nsze);
1549         return NVME_LBA_RANGE | NVME_DNR;
1550     }
1551 
1552     return NVME_SUCCESS;
1553 }
1554 
1555 static int nvme_block_status_all(NvmeNamespace *ns, uint64_t slba,
1556                                  uint32_t nlb, int flags)
1557 {
1558     BlockDriverState *bs = blk_bs(ns->blkconf.blk);
1559 
1560     int64_t pnum = 0, bytes = nvme_l2b(ns, nlb);
1561     int64_t offset = nvme_l2b(ns, slba);
1562     int ret;
1563 
1564     /*
1565      * `pnum` holds the number of bytes after offset that shares the same
1566      * allocation status as the byte at offset. If `pnum` is different from
1567      * `bytes`, we should check the allocation status of the next range and
1568      * continue this until all bytes have been checked.
1569      */
1570     do {
1571         bytes -= pnum;
1572 
1573         ret = bdrv_block_status(bs, offset, bytes, &pnum, NULL, NULL);
1574         if (ret < 0) {
1575             return ret;
1576         }
1577 
1578 
1579         trace_pci_nvme_block_status(offset, bytes, pnum, ret,
1580                                     !!(ret & BDRV_BLOCK_ZERO));
1581 
1582         if (!(ret & flags)) {
1583             return 1;
1584         }
1585 
1586         offset += pnum;
1587     } while (pnum != bytes);
1588 
1589     return 0;
1590 }
1591 
1592 static uint16_t nvme_check_dulbe(NvmeNamespace *ns, uint64_t slba,
1593                                  uint32_t nlb)
1594 {
1595     int ret;
1596     Error *err = NULL;
1597 
1598     ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_DATA);
1599     if (ret) {
1600         if (ret < 0) {
1601             error_setg_errno(&err, -ret, "unable to get block status");
1602             error_report_err(err);
1603 
1604             return NVME_INTERNAL_DEV_ERROR;
1605         }
1606 
1607         return NVME_DULB;
1608     }
1609 
1610     return NVME_SUCCESS;
1611 }
1612 
1613 static void nvme_aio_err(NvmeRequest *req, int ret)
1614 {
1615     uint16_t status = NVME_SUCCESS;
1616     Error *local_err = NULL;
1617 
1618     switch (req->cmd.opcode) {
1619     case NVME_CMD_READ:
1620         status = NVME_UNRECOVERED_READ;
1621         break;
1622     case NVME_CMD_FLUSH:
1623     case NVME_CMD_WRITE:
1624     case NVME_CMD_WRITE_ZEROES:
1625     case NVME_CMD_ZONE_APPEND:
1626         status = NVME_WRITE_FAULT;
1627         break;
1628     default:
1629         status = NVME_INTERNAL_DEV_ERROR;
1630         break;
1631     }
1632 
1633     trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status);
1634 
1635     error_setg_errno(&local_err, -ret, "aio failed");
1636     error_report_err(local_err);
1637 
1638     /*
1639      * Set the command status code to the first encountered error but allow a
1640      * subsequent Internal Device Error to trump it.
1641      */
1642     if (req->status && status != NVME_INTERNAL_DEV_ERROR) {
1643         return;
1644     }
1645 
1646     req->status = status;
1647 }
1648 
1649 static inline uint32_t nvme_zone_idx(NvmeNamespace *ns, uint64_t slba)
1650 {
1651     return ns->zone_size_log2 > 0 ? slba >> ns->zone_size_log2 :
1652                                     slba / ns->zone_size;
1653 }
1654 
1655 static inline NvmeZone *nvme_get_zone_by_slba(NvmeNamespace *ns, uint64_t slba)
1656 {
1657     uint32_t zone_idx = nvme_zone_idx(ns, slba);
1658 
1659     if (zone_idx >= ns->num_zones) {
1660         return NULL;
1661     }
1662 
1663     return &ns->zone_array[zone_idx];
1664 }
1665 
1666 static uint16_t nvme_check_zone_state_for_write(NvmeZone *zone)
1667 {
1668     uint64_t zslba = zone->d.zslba;
1669 
1670     switch (nvme_get_zone_state(zone)) {
1671     case NVME_ZONE_STATE_EMPTY:
1672     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1673     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1674     case NVME_ZONE_STATE_CLOSED:
1675         return NVME_SUCCESS;
1676     case NVME_ZONE_STATE_FULL:
1677         trace_pci_nvme_err_zone_is_full(zslba);
1678         return NVME_ZONE_FULL;
1679     case NVME_ZONE_STATE_OFFLINE:
1680         trace_pci_nvme_err_zone_is_offline(zslba);
1681         return NVME_ZONE_OFFLINE;
1682     case NVME_ZONE_STATE_READ_ONLY:
1683         trace_pci_nvme_err_zone_is_read_only(zslba);
1684         return NVME_ZONE_READ_ONLY;
1685     default:
1686         assert(false);
1687     }
1688 
1689     return NVME_INTERNAL_DEV_ERROR;
1690 }
1691 
1692 static uint16_t nvme_check_zone_write(NvmeNamespace *ns, NvmeZone *zone,
1693                                       uint64_t slba, uint32_t nlb)
1694 {
1695     uint64_t zcap = nvme_zone_wr_boundary(zone);
1696     uint16_t status;
1697 
1698     status = nvme_check_zone_state_for_write(zone);
1699     if (status) {
1700         return status;
1701     }
1702 
1703     if (zone->d.za & NVME_ZA_ZRWA_VALID) {
1704         uint64_t ezrwa = zone->w_ptr + 2 * ns->zns.zrwas;
1705 
1706         if (slba < zone->w_ptr || slba + nlb > ezrwa) {
1707             trace_pci_nvme_err_zone_invalid_write(slba, zone->w_ptr);
1708             return NVME_ZONE_INVALID_WRITE;
1709         }
1710     } else {
1711         if (unlikely(slba != zone->w_ptr)) {
1712             trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba,
1713                                                zone->w_ptr);
1714             return NVME_ZONE_INVALID_WRITE;
1715         }
1716     }
1717 
1718     if (unlikely((slba + nlb) > zcap)) {
1719         trace_pci_nvme_err_zone_boundary(slba, nlb, zcap);
1720         return NVME_ZONE_BOUNDARY_ERROR;
1721     }
1722 
1723     return NVME_SUCCESS;
1724 }
1725 
1726 static uint16_t nvme_check_zone_state_for_read(NvmeZone *zone)
1727 {
1728     switch (nvme_get_zone_state(zone)) {
1729     case NVME_ZONE_STATE_EMPTY:
1730     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1731     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1732     case NVME_ZONE_STATE_FULL:
1733     case NVME_ZONE_STATE_CLOSED:
1734     case NVME_ZONE_STATE_READ_ONLY:
1735         return NVME_SUCCESS;
1736     case NVME_ZONE_STATE_OFFLINE:
1737         trace_pci_nvme_err_zone_is_offline(zone->d.zslba);
1738         return NVME_ZONE_OFFLINE;
1739     default:
1740         assert(false);
1741     }
1742 
1743     return NVME_INTERNAL_DEV_ERROR;
1744 }
1745 
1746 static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba,
1747                                      uint32_t nlb)
1748 {
1749     NvmeZone *zone;
1750     uint64_t bndry, end;
1751     uint16_t status;
1752 
1753     zone = nvme_get_zone_by_slba(ns, slba);
1754     assert(zone);
1755 
1756     bndry = nvme_zone_rd_boundary(ns, zone);
1757     end = slba + nlb;
1758 
1759     status = nvme_check_zone_state_for_read(zone);
1760     if (status) {
1761         ;
1762     } else if (unlikely(end > bndry)) {
1763         if (!ns->params.cross_zone_read) {
1764             status = NVME_ZONE_BOUNDARY_ERROR;
1765         } else {
1766             /*
1767              * Read across zone boundary - check that all subsequent
1768              * zones that are being read have an appropriate state.
1769              */
1770             do {
1771                 zone++;
1772                 status = nvme_check_zone_state_for_read(zone);
1773                 if (status) {
1774                     break;
1775                 }
1776             } while (end > nvme_zone_rd_boundary(ns, zone));
1777         }
1778     }
1779 
1780     return status;
1781 }
1782 
1783 static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone)
1784 {
1785     switch (nvme_get_zone_state(zone)) {
1786     case NVME_ZONE_STATE_FULL:
1787         return NVME_SUCCESS;
1788 
1789     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1790     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1791         nvme_aor_dec_open(ns);
1792         /* fallthrough */
1793     case NVME_ZONE_STATE_CLOSED:
1794         nvme_aor_dec_active(ns);
1795 
1796         if (zone->d.za & NVME_ZA_ZRWA_VALID) {
1797             zone->d.za &= ~NVME_ZA_ZRWA_VALID;
1798             if (ns->params.numzrwa) {
1799                 ns->zns.numzrwa++;
1800             }
1801         }
1802 
1803         /* fallthrough */
1804     case NVME_ZONE_STATE_EMPTY:
1805         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL);
1806         return NVME_SUCCESS;
1807 
1808     default:
1809         return NVME_ZONE_INVAL_TRANSITION;
1810     }
1811 }
1812 
1813 static uint16_t nvme_zrm_close(NvmeNamespace *ns, NvmeZone *zone)
1814 {
1815     switch (nvme_get_zone_state(zone)) {
1816     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1817     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1818         nvme_aor_dec_open(ns);
1819         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
1820         /* fall through */
1821     case NVME_ZONE_STATE_CLOSED:
1822         return NVME_SUCCESS;
1823 
1824     default:
1825         return NVME_ZONE_INVAL_TRANSITION;
1826     }
1827 }
1828 
1829 static uint16_t nvme_zrm_reset(NvmeNamespace *ns, NvmeZone *zone)
1830 {
1831     switch (nvme_get_zone_state(zone)) {
1832     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1833     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1834         nvme_aor_dec_open(ns);
1835         /* fallthrough */
1836     case NVME_ZONE_STATE_CLOSED:
1837         nvme_aor_dec_active(ns);
1838 
1839         if (zone->d.za & NVME_ZA_ZRWA_VALID) {
1840             if (ns->params.numzrwa) {
1841                 ns->zns.numzrwa++;
1842             }
1843         }
1844 
1845         /* fallthrough */
1846     case NVME_ZONE_STATE_FULL:
1847         zone->w_ptr = zone->d.zslba;
1848         zone->d.wp = zone->w_ptr;
1849         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EMPTY);
1850         /* fallthrough */
1851     case NVME_ZONE_STATE_EMPTY:
1852         return NVME_SUCCESS;
1853 
1854     default:
1855         return NVME_ZONE_INVAL_TRANSITION;
1856     }
1857 }
1858 
1859 static void nvme_zrm_auto_transition_zone(NvmeNamespace *ns)
1860 {
1861     NvmeZone *zone;
1862 
1863     if (ns->params.max_open_zones &&
1864         ns->nr_open_zones == ns->params.max_open_zones) {
1865         zone = QTAILQ_FIRST(&ns->imp_open_zones);
1866         if (zone) {
1867             /*
1868              * Automatically close this implicitly open zone.
1869              */
1870             QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry);
1871             nvme_zrm_close(ns, zone);
1872         }
1873     }
1874 }
1875 
1876 enum {
1877     NVME_ZRM_AUTO = 1 << 0,
1878     NVME_ZRM_ZRWA = 1 << 1,
1879 };
1880 
1881 static uint16_t nvme_zrm_open_flags(NvmeCtrl *n, NvmeNamespace *ns,
1882                                     NvmeZone *zone, int flags)
1883 {
1884     int act = 0;
1885     uint16_t status;
1886 
1887     switch (nvme_get_zone_state(zone)) {
1888     case NVME_ZONE_STATE_EMPTY:
1889         act = 1;
1890 
1891         /* fallthrough */
1892 
1893     case NVME_ZONE_STATE_CLOSED:
1894         if (n->params.auto_transition_zones) {
1895             nvme_zrm_auto_transition_zone(ns);
1896         }
1897         status = nvme_zns_check_resources(ns, act, 1,
1898                                           (flags & NVME_ZRM_ZRWA) ? 1 : 0);
1899         if (status) {
1900             return status;
1901         }
1902 
1903         if (act) {
1904             nvme_aor_inc_active(ns);
1905         }
1906 
1907         nvme_aor_inc_open(ns);
1908 
1909         if (flags & NVME_ZRM_AUTO) {
1910             nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN);
1911             return NVME_SUCCESS;
1912         }
1913 
1914         /* fallthrough */
1915 
1916     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1917         if (flags & NVME_ZRM_AUTO) {
1918             return NVME_SUCCESS;
1919         }
1920 
1921         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN);
1922 
1923         /* fallthrough */
1924 
1925     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1926         if (flags & NVME_ZRM_ZRWA) {
1927             ns->zns.numzrwa--;
1928 
1929             zone->d.za |= NVME_ZA_ZRWA_VALID;
1930         }
1931 
1932         return NVME_SUCCESS;
1933 
1934     default:
1935         return NVME_ZONE_INVAL_TRANSITION;
1936     }
1937 }
1938 
1939 static inline uint16_t nvme_zrm_auto(NvmeCtrl *n, NvmeNamespace *ns,
1940                                      NvmeZone *zone)
1941 {
1942     return nvme_zrm_open_flags(n, ns, zone, NVME_ZRM_AUTO);
1943 }
1944 
1945 static void nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone,
1946                                  uint32_t nlb)
1947 {
1948     zone->d.wp += nlb;
1949 
1950     if (zone->d.wp == nvme_zone_wr_boundary(zone)) {
1951         nvme_zrm_finish(ns, zone);
1952     }
1953 }
1954 
1955 static void nvme_zoned_zrwa_implicit_flush(NvmeNamespace *ns, NvmeZone *zone,
1956                                            uint32_t nlbc)
1957 {
1958     uint16_t nzrwafgs = DIV_ROUND_UP(nlbc, ns->zns.zrwafg);
1959 
1960     nlbc = nzrwafgs * ns->zns.zrwafg;
1961 
1962     trace_pci_nvme_zoned_zrwa_implicit_flush(zone->d.zslba, nlbc);
1963 
1964     zone->w_ptr += nlbc;
1965 
1966     nvme_advance_zone_wp(ns, zone, nlbc);
1967 }
1968 
1969 static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req)
1970 {
1971     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
1972     NvmeZone *zone;
1973     uint64_t slba;
1974     uint32_t nlb;
1975 
1976     slba = le64_to_cpu(rw->slba);
1977     nlb = le16_to_cpu(rw->nlb) + 1;
1978     zone = nvme_get_zone_by_slba(ns, slba);
1979     assert(zone);
1980 
1981     if (zone->d.za & NVME_ZA_ZRWA_VALID) {
1982         uint64_t ezrwa = zone->w_ptr + ns->zns.zrwas - 1;
1983         uint64_t elba = slba + nlb - 1;
1984 
1985         if (elba > ezrwa) {
1986             nvme_zoned_zrwa_implicit_flush(ns, zone, elba - ezrwa);
1987         }
1988 
1989         return;
1990     }
1991 
1992     nvme_advance_zone_wp(ns, zone, nlb);
1993 }
1994 
1995 static inline bool nvme_is_write(NvmeRequest *req)
1996 {
1997     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
1998 
1999     return rw->opcode == NVME_CMD_WRITE ||
2000            rw->opcode == NVME_CMD_ZONE_APPEND ||
2001            rw->opcode == NVME_CMD_WRITE_ZEROES;
2002 }
2003 
2004 static AioContext *nvme_get_aio_context(BlockAIOCB *acb)
2005 {
2006     return qemu_get_aio_context();
2007 }
2008 
2009 static void nvme_misc_cb(void *opaque, int ret)
2010 {
2011     NvmeRequest *req = opaque;
2012 
2013     trace_pci_nvme_misc_cb(nvme_cid(req));
2014 
2015     if (ret) {
2016         nvme_aio_err(req, ret);
2017     }
2018 
2019     nvme_enqueue_req_completion(nvme_cq(req), req);
2020 }
2021 
2022 void nvme_rw_complete_cb(void *opaque, int ret)
2023 {
2024     NvmeRequest *req = opaque;
2025     NvmeNamespace *ns = req->ns;
2026     BlockBackend *blk = ns->blkconf.blk;
2027     BlockAcctCookie *acct = &req->acct;
2028     BlockAcctStats *stats = blk_get_stats(blk);
2029 
2030     trace_pci_nvme_rw_complete_cb(nvme_cid(req), blk_name(blk));
2031 
2032     if (ret) {
2033         block_acct_failed(stats, acct);
2034         nvme_aio_err(req, ret);
2035     } else {
2036         block_acct_done(stats, acct);
2037     }
2038 
2039     if (ns->params.zoned && nvme_is_write(req)) {
2040         nvme_finalize_zoned_write(ns, req);
2041     }
2042 
2043     nvme_enqueue_req_completion(nvme_cq(req), req);
2044 }
2045 
2046 static void nvme_rw_cb(void *opaque, int ret)
2047 {
2048     NvmeRequest *req = opaque;
2049     NvmeNamespace *ns = req->ns;
2050 
2051     BlockBackend *blk = ns->blkconf.blk;
2052 
2053     trace_pci_nvme_rw_cb(nvme_cid(req), blk_name(blk));
2054 
2055     if (ret) {
2056         goto out;
2057     }
2058 
2059     if (ns->lbaf.ms) {
2060         NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2061         uint64_t slba = le64_to_cpu(rw->slba);
2062         uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
2063         uint64_t offset = nvme_moff(ns, slba);
2064 
2065         if (req->cmd.opcode == NVME_CMD_WRITE_ZEROES) {
2066             size_t mlen = nvme_m2b(ns, nlb);
2067 
2068             req->aiocb = blk_aio_pwrite_zeroes(blk, offset, mlen,
2069                                                BDRV_REQ_MAY_UNMAP,
2070                                                nvme_rw_complete_cb, req);
2071             return;
2072         }
2073 
2074         if (nvme_ns_ext(ns) || req->cmd.mptr) {
2075             uint16_t status;
2076 
2077             nvme_sg_unmap(&req->sg);
2078             status = nvme_map_mdata(nvme_ctrl(req), nlb, req);
2079             if (status) {
2080                 ret = -EFAULT;
2081                 goto out;
2082             }
2083 
2084             if (req->cmd.opcode == NVME_CMD_READ) {
2085                 return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req);
2086             }
2087 
2088             return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req);
2089         }
2090     }
2091 
2092 out:
2093     nvme_rw_complete_cb(req, ret);
2094 }
2095 
2096 static void nvme_verify_cb(void *opaque, int ret)
2097 {
2098     NvmeBounceContext *ctx = opaque;
2099     NvmeRequest *req = ctx->req;
2100     NvmeNamespace *ns = req->ns;
2101     BlockBackend *blk = ns->blkconf.blk;
2102     BlockAcctCookie *acct = &req->acct;
2103     BlockAcctStats *stats = blk_get_stats(blk);
2104     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2105     uint64_t slba = le64_to_cpu(rw->slba);
2106     uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control));
2107     uint16_t apptag = le16_to_cpu(rw->apptag);
2108     uint16_t appmask = le16_to_cpu(rw->appmask);
2109     uint64_t reftag = le32_to_cpu(rw->reftag);
2110     uint64_t cdw3 = le32_to_cpu(rw->cdw3);
2111     uint16_t status;
2112 
2113     reftag |= cdw3 << 32;
2114 
2115     trace_pci_nvme_verify_cb(nvme_cid(req), prinfo, apptag, appmask, reftag);
2116 
2117     if (ret) {
2118         block_acct_failed(stats, acct);
2119         nvme_aio_err(req, ret);
2120         goto out;
2121     }
2122 
2123     block_acct_done(stats, acct);
2124 
2125     if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
2126         status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce,
2127                                        ctx->mdata.iov.size, slba);
2128         if (status) {
2129             req->status = status;
2130             goto out;
2131         }
2132 
2133         req->status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size,
2134                                      ctx->mdata.bounce, ctx->mdata.iov.size,
2135                                      prinfo, slba, apptag, appmask, &reftag);
2136     }
2137 
2138 out:
2139     qemu_iovec_destroy(&ctx->data.iov);
2140     g_free(ctx->data.bounce);
2141 
2142     qemu_iovec_destroy(&ctx->mdata.iov);
2143     g_free(ctx->mdata.bounce);
2144 
2145     g_free(ctx);
2146 
2147     nvme_enqueue_req_completion(nvme_cq(req), req);
2148 }
2149 
2150 
2151 static void nvme_verify_mdata_in_cb(void *opaque, int ret)
2152 {
2153     NvmeBounceContext *ctx = opaque;
2154     NvmeRequest *req = ctx->req;
2155     NvmeNamespace *ns = req->ns;
2156     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2157     uint64_t slba = le64_to_cpu(rw->slba);
2158     uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
2159     size_t mlen = nvme_m2b(ns, nlb);
2160     uint64_t offset = nvme_moff(ns, slba);
2161     BlockBackend *blk = ns->blkconf.blk;
2162 
2163     trace_pci_nvme_verify_mdata_in_cb(nvme_cid(req), blk_name(blk));
2164 
2165     if (ret) {
2166         goto out;
2167     }
2168 
2169     ctx->mdata.bounce = g_malloc(mlen);
2170 
2171     qemu_iovec_reset(&ctx->mdata.iov);
2172     qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen);
2173 
2174     req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0,
2175                                 nvme_verify_cb, ctx);
2176     return;
2177 
2178 out:
2179     nvme_verify_cb(ctx, ret);
2180 }
2181 
2182 struct nvme_compare_ctx {
2183     struct {
2184         QEMUIOVector iov;
2185         uint8_t *bounce;
2186     } data;
2187 
2188     struct {
2189         QEMUIOVector iov;
2190         uint8_t *bounce;
2191     } mdata;
2192 };
2193 
2194 static void nvme_compare_mdata_cb(void *opaque, int ret)
2195 {
2196     NvmeRequest *req = opaque;
2197     NvmeNamespace *ns = req->ns;
2198     NvmeCtrl *n = nvme_ctrl(req);
2199     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2200     uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control));
2201     uint16_t apptag = le16_to_cpu(rw->apptag);
2202     uint16_t appmask = le16_to_cpu(rw->appmask);
2203     uint64_t reftag = le32_to_cpu(rw->reftag);
2204     uint64_t cdw3 = le32_to_cpu(rw->cdw3);
2205     struct nvme_compare_ctx *ctx = req->opaque;
2206     g_autofree uint8_t *buf = NULL;
2207     BlockBackend *blk = ns->blkconf.blk;
2208     BlockAcctCookie *acct = &req->acct;
2209     BlockAcctStats *stats = blk_get_stats(blk);
2210     uint16_t status = NVME_SUCCESS;
2211 
2212     reftag |= cdw3 << 32;
2213 
2214     trace_pci_nvme_compare_mdata_cb(nvme_cid(req));
2215 
2216     if (ret) {
2217         block_acct_failed(stats, acct);
2218         nvme_aio_err(req, ret);
2219         goto out;
2220     }
2221 
2222     buf = g_malloc(ctx->mdata.iov.size);
2223 
2224     status = nvme_bounce_mdata(n, buf, ctx->mdata.iov.size,
2225                                NVME_TX_DIRECTION_TO_DEVICE, req);
2226     if (status) {
2227         req->status = status;
2228         goto out;
2229     }
2230 
2231     if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
2232         uint64_t slba = le64_to_cpu(rw->slba);
2233         uint8_t *bufp;
2234         uint8_t *mbufp = ctx->mdata.bounce;
2235         uint8_t *end = mbufp + ctx->mdata.iov.size;
2236         int16_t pil = 0;
2237 
2238         status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size,
2239                                 ctx->mdata.bounce, ctx->mdata.iov.size, prinfo,
2240                                 slba, apptag, appmask, &reftag);
2241         if (status) {
2242             req->status = status;
2243             goto out;
2244         }
2245 
2246         /*
2247          * When formatted with protection information, do not compare the DIF
2248          * tuple.
2249          */
2250         if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) {
2251             pil = ns->lbaf.ms - nvme_pi_tuple_size(ns);
2252         }
2253 
2254         for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) {
2255             if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) {
2256                 req->status = NVME_CMP_FAILURE;
2257                 goto out;
2258             }
2259         }
2260 
2261         goto out;
2262     }
2263 
2264     if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) {
2265         req->status = NVME_CMP_FAILURE;
2266         goto out;
2267     }
2268 
2269     block_acct_done(stats, acct);
2270 
2271 out:
2272     qemu_iovec_destroy(&ctx->data.iov);
2273     g_free(ctx->data.bounce);
2274 
2275     qemu_iovec_destroy(&ctx->mdata.iov);
2276     g_free(ctx->mdata.bounce);
2277 
2278     g_free(ctx);
2279 
2280     nvme_enqueue_req_completion(nvme_cq(req), req);
2281 }
2282 
2283 static void nvme_compare_data_cb(void *opaque, int ret)
2284 {
2285     NvmeRequest *req = opaque;
2286     NvmeCtrl *n = nvme_ctrl(req);
2287     NvmeNamespace *ns = req->ns;
2288     BlockBackend *blk = ns->blkconf.blk;
2289     BlockAcctCookie *acct = &req->acct;
2290     BlockAcctStats *stats = blk_get_stats(blk);
2291 
2292     struct nvme_compare_ctx *ctx = req->opaque;
2293     g_autofree uint8_t *buf = NULL;
2294     uint16_t status;
2295 
2296     trace_pci_nvme_compare_data_cb(nvme_cid(req));
2297 
2298     if (ret) {
2299         block_acct_failed(stats, acct);
2300         nvme_aio_err(req, ret);
2301         goto out;
2302     }
2303 
2304     buf = g_malloc(ctx->data.iov.size);
2305 
2306     status = nvme_bounce_data(n, buf, ctx->data.iov.size,
2307                               NVME_TX_DIRECTION_TO_DEVICE, req);
2308     if (status) {
2309         req->status = status;
2310         goto out;
2311     }
2312 
2313     if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) {
2314         req->status = NVME_CMP_FAILURE;
2315         goto out;
2316     }
2317 
2318     if (ns->lbaf.ms) {
2319         NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2320         uint64_t slba = le64_to_cpu(rw->slba);
2321         uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
2322         size_t mlen = nvme_m2b(ns, nlb);
2323         uint64_t offset = nvme_moff(ns, slba);
2324 
2325         ctx->mdata.bounce = g_malloc(mlen);
2326 
2327         qemu_iovec_init(&ctx->mdata.iov, 1);
2328         qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen);
2329 
2330         req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0,
2331                                     nvme_compare_mdata_cb, req);
2332         return;
2333     }
2334 
2335     block_acct_done(stats, acct);
2336 
2337 out:
2338     qemu_iovec_destroy(&ctx->data.iov);
2339     g_free(ctx->data.bounce);
2340     g_free(ctx);
2341 
2342     nvme_enqueue_req_completion(nvme_cq(req), req);
2343 }
2344 
2345 typedef struct NvmeDSMAIOCB {
2346     BlockAIOCB common;
2347     BlockAIOCB *aiocb;
2348     NvmeRequest *req;
2349     int ret;
2350 
2351     NvmeDsmRange *range;
2352     unsigned int nr;
2353     unsigned int idx;
2354 } NvmeDSMAIOCB;
2355 
2356 static void nvme_dsm_cancel(BlockAIOCB *aiocb)
2357 {
2358     NvmeDSMAIOCB *iocb = container_of(aiocb, NvmeDSMAIOCB, common);
2359 
2360     /* break nvme_dsm_cb loop */
2361     iocb->idx = iocb->nr;
2362     iocb->ret = -ECANCELED;
2363 
2364     if (iocb->aiocb) {
2365         blk_aio_cancel_async(iocb->aiocb);
2366         iocb->aiocb = NULL;
2367     } else {
2368         /*
2369          * We only reach this if nvme_dsm_cancel() has already been called or
2370          * the command ran to completion.
2371          */
2372         assert(iocb->idx == iocb->nr);
2373     }
2374 }
2375 
2376 static const AIOCBInfo nvme_dsm_aiocb_info = {
2377     .aiocb_size   = sizeof(NvmeDSMAIOCB),
2378     .cancel_async = nvme_dsm_cancel,
2379 };
2380 
2381 static void nvme_dsm_cb(void *opaque, int ret);
2382 
2383 static void nvme_dsm_md_cb(void *opaque, int ret)
2384 {
2385     NvmeDSMAIOCB *iocb = opaque;
2386     NvmeRequest *req = iocb->req;
2387     NvmeNamespace *ns = req->ns;
2388     NvmeDsmRange *range;
2389     uint64_t slba;
2390     uint32_t nlb;
2391 
2392     if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
2393         goto done;
2394     }
2395 
2396     range = &iocb->range[iocb->idx - 1];
2397     slba = le64_to_cpu(range->slba);
2398     nlb = le32_to_cpu(range->nlb);
2399 
2400     /*
2401      * Check that all block were discarded (zeroed); otherwise we do not zero
2402      * the metadata.
2403      */
2404 
2405     ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_ZERO);
2406     if (ret) {
2407         if (ret < 0) {
2408             goto done;
2409         }
2410 
2411         nvme_dsm_cb(iocb, 0);
2412         return;
2413     }
2414 
2415     iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, nvme_moff(ns, slba),
2416                                         nvme_m2b(ns, nlb), BDRV_REQ_MAY_UNMAP,
2417                                         nvme_dsm_cb, iocb);
2418     return;
2419 
2420 done:
2421     nvme_dsm_cb(iocb, ret);
2422 }
2423 
2424 static void nvme_dsm_cb(void *opaque, int ret)
2425 {
2426     NvmeDSMAIOCB *iocb = opaque;
2427     NvmeRequest *req = iocb->req;
2428     NvmeCtrl *n = nvme_ctrl(req);
2429     NvmeNamespace *ns = req->ns;
2430     NvmeDsmRange *range;
2431     uint64_t slba;
2432     uint32_t nlb;
2433 
2434     if (iocb->ret < 0) {
2435         goto done;
2436     } else if (ret < 0) {
2437         iocb->ret = ret;
2438         goto done;
2439     }
2440 
2441 next:
2442     if (iocb->idx == iocb->nr) {
2443         goto done;
2444     }
2445 
2446     range = &iocb->range[iocb->idx++];
2447     slba = le64_to_cpu(range->slba);
2448     nlb = le32_to_cpu(range->nlb);
2449 
2450     trace_pci_nvme_dsm_deallocate(slba, nlb);
2451 
2452     if (nlb > n->dmrsl) {
2453         trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, n->dmrsl);
2454         goto next;
2455     }
2456 
2457     if (nvme_check_bounds(ns, slba, nlb)) {
2458         trace_pci_nvme_err_invalid_lba_range(slba, nlb,
2459                                              ns->id_ns.nsze);
2460         goto next;
2461     }
2462 
2463     iocb->aiocb = blk_aio_pdiscard(ns->blkconf.blk, nvme_l2b(ns, slba),
2464                                    nvme_l2b(ns, nlb),
2465                                    nvme_dsm_md_cb, iocb);
2466     return;
2467 
2468 done:
2469     iocb->aiocb = NULL;
2470     iocb->common.cb(iocb->common.opaque, iocb->ret);
2471     qemu_aio_unref(iocb);
2472 }
2473 
2474 static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
2475 {
2476     NvmeNamespace *ns = req->ns;
2477     NvmeDsmCmd *dsm = (NvmeDsmCmd *) &req->cmd;
2478     uint32_t attr = le32_to_cpu(dsm->attributes);
2479     uint32_t nr = (le32_to_cpu(dsm->nr) & 0xff) + 1;
2480     uint16_t status = NVME_SUCCESS;
2481 
2482     trace_pci_nvme_dsm(nr, attr);
2483 
2484     if (attr & NVME_DSMGMT_AD) {
2485         NvmeDSMAIOCB *iocb = blk_aio_get(&nvme_dsm_aiocb_info, ns->blkconf.blk,
2486                                          nvme_misc_cb, req);
2487 
2488         iocb->req = req;
2489         iocb->ret = 0;
2490         iocb->range = g_new(NvmeDsmRange, nr);
2491         iocb->nr = nr;
2492         iocb->idx = 0;
2493 
2494         status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr,
2495                           req);
2496         if (status) {
2497             return status;
2498         }
2499 
2500         req->aiocb = &iocb->common;
2501         nvme_dsm_cb(iocb, 0);
2502 
2503         return NVME_NO_COMPLETE;
2504     }
2505 
2506     return status;
2507 }
2508 
2509 static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req)
2510 {
2511     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2512     NvmeNamespace *ns = req->ns;
2513     BlockBackend *blk = ns->blkconf.blk;
2514     uint64_t slba = le64_to_cpu(rw->slba);
2515     uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
2516     size_t len = nvme_l2b(ns, nlb);
2517     int64_t offset = nvme_l2b(ns, slba);
2518     uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control));
2519     uint32_t reftag = le32_to_cpu(rw->reftag);
2520     NvmeBounceContext *ctx = NULL;
2521     uint16_t status;
2522 
2523     trace_pci_nvme_verify(nvme_cid(req), nvme_nsid(ns), slba, nlb);
2524 
2525     if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
2526         status = nvme_check_prinfo(ns, prinfo, slba, reftag);
2527         if (status) {
2528             return status;
2529         }
2530 
2531         if (prinfo & NVME_PRINFO_PRACT) {
2532             return NVME_INVALID_PROT_INFO | NVME_DNR;
2533         }
2534     }
2535 
2536     if (len > n->page_size << n->params.vsl) {
2537         return NVME_INVALID_FIELD | NVME_DNR;
2538     }
2539 
2540     status = nvme_check_bounds(ns, slba, nlb);
2541     if (status) {
2542         return status;
2543     }
2544 
2545     if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
2546         status = nvme_check_dulbe(ns, slba, nlb);
2547         if (status) {
2548             return status;
2549         }
2550     }
2551 
2552     ctx = g_new0(NvmeBounceContext, 1);
2553     ctx->req = req;
2554 
2555     ctx->data.bounce = g_malloc(len);
2556 
2557     qemu_iovec_init(&ctx->data.iov, 1);
2558     qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len);
2559 
2560     block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size,
2561                      BLOCK_ACCT_READ);
2562 
2563     req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0,
2564                                 nvme_verify_mdata_in_cb, ctx);
2565     return NVME_NO_COMPLETE;
2566 }
2567 
2568 typedef struct NvmeCopyAIOCB {
2569     BlockAIOCB common;
2570     BlockAIOCB *aiocb;
2571     NvmeRequest *req;
2572     int ret;
2573 
2574     void *ranges;
2575     unsigned int format;
2576     int nr;
2577     int idx;
2578 
2579     uint8_t *bounce;
2580     QEMUIOVector iov;
2581     struct {
2582         BlockAcctCookie read;
2583         BlockAcctCookie write;
2584     } acct;
2585 
2586     uint64_t reftag;
2587     uint64_t slba;
2588 
2589     NvmeZone *zone;
2590 } NvmeCopyAIOCB;
2591 
2592 static void nvme_copy_cancel(BlockAIOCB *aiocb)
2593 {
2594     NvmeCopyAIOCB *iocb = container_of(aiocb, NvmeCopyAIOCB, common);
2595 
2596     iocb->ret = -ECANCELED;
2597 
2598     if (iocb->aiocb) {
2599         blk_aio_cancel_async(iocb->aiocb);
2600         iocb->aiocb = NULL;
2601     }
2602 }
2603 
2604 static const AIOCBInfo nvme_copy_aiocb_info = {
2605     .aiocb_size   = sizeof(NvmeCopyAIOCB),
2606     .cancel_async = nvme_copy_cancel,
2607 };
2608 
2609 static void nvme_copy_done(NvmeCopyAIOCB *iocb)
2610 {
2611     NvmeRequest *req = iocb->req;
2612     NvmeNamespace *ns = req->ns;
2613     BlockAcctStats *stats = blk_get_stats(ns->blkconf.blk);
2614 
2615     if (iocb->idx != iocb->nr) {
2616         req->cqe.result = cpu_to_le32(iocb->idx);
2617     }
2618 
2619     qemu_iovec_destroy(&iocb->iov);
2620     g_free(iocb->bounce);
2621 
2622     if (iocb->ret < 0) {
2623         block_acct_failed(stats, &iocb->acct.read);
2624         block_acct_failed(stats, &iocb->acct.write);
2625     } else {
2626         block_acct_done(stats, &iocb->acct.read);
2627         block_acct_done(stats, &iocb->acct.write);
2628     }
2629 
2630     iocb->common.cb(iocb->common.opaque, iocb->ret);
2631     qemu_aio_unref(iocb);
2632 }
2633 
2634 static void nvme_do_copy(NvmeCopyAIOCB *iocb);
2635 
2636 static void nvme_copy_source_range_parse_format0(void *ranges, int idx,
2637                                                  uint64_t *slba, uint32_t *nlb,
2638                                                  uint16_t *apptag,
2639                                                  uint16_t *appmask,
2640                                                  uint64_t *reftag)
2641 {
2642     NvmeCopySourceRangeFormat0 *_ranges = ranges;
2643 
2644     if (slba) {
2645         *slba = le64_to_cpu(_ranges[idx].slba);
2646     }
2647 
2648     if (nlb) {
2649         *nlb = le16_to_cpu(_ranges[idx].nlb) + 1;
2650     }
2651 
2652     if (apptag) {
2653         *apptag = le16_to_cpu(_ranges[idx].apptag);
2654     }
2655 
2656     if (appmask) {
2657         *appmask = le16_to_cpu(_ranges[idx].appmask);
2658     }
2659 
2660     if (reftag) {
2661         *reftag = le32_to_cpu(_ranges[idx].reftag);
2662     }
2663 }
2664 
2665 static void nvme_copy_source_range_parse_format1(void *ranges, int idx,
2666                                                  uint64_t *slba, uint32_t *nlb,
2667                                                  uint16_t *apptag,
2668                                                  uint16_t *appmask,
2669                                                  uint64_t *reftag)
2670 {
2671     NvmeCopySourceRangeFormat1 *_ranges = ranges;
2672 
2673     if (slba) {
2674         *slba = le64_to_cpu(_ranges[idx].slba);
2675     }
2676 
2677     if (nlb) {
2678         *nlb = le16_to_cpu(_ranges[idx].nlb) + 1;
2679     }
2680 
2681     if (apptag) {
2682         *apptag = le16_to_cpu(_ranges[idx].apptag);
2683     }
2684 
2685     if (appmask) {
2686         *appmask = le16_to_cpu(_ranges[idx].appmask);
2687     }
2688 
2689     if (reftag) {
2690         *reftag = 0;
2691 
2692         *reftag |= (uint64_t)_ranges[idx].sr[4] << 40;
2693         *reftag |= (uint64_t)_ranges[idx].sr[5] << 32;
2694         *reftag |= (uint64_t)_ranges[idx].sr[6] << 24;
2695         *reftag |= (uint64_t)_ranges[idx].sr[7] << 16;
2696         *reftag |= (uint64_t)_ranges[idx].sr[8] << 8;
2697         *reftag |= (uint64_t)_ranges[idx].sr[9];
2698     }
2699 }
2700 
2701 static void nvme_copy_source_range_parse(void *ranges, int idx, uint8_t format,
2702                                          uint64_t *slba, uint32_t *nlb,
2703                                          uint16_t *apptag, uint16_t *appmask,
2704                                          uint64_t *reftag)
2705 {
2706     switch (format) {
2707     case NVME_COPY_FORMAT_0:
2708         nvme_copy_source_range_parse_format0(ranges, idx, slba, nlb, apptag,
2709                                              appmask, reftag);
2710         break;
2711 
2712     case NVME_COPY_FORMAT_1:
2713         nvme_copy_source_range_parse_format1(ranges, idx, slba, nlb, apptag,
2714                                              appmask, reftag);
2715         break;
2716 
2717     default:
2718         abort();
2719     }
2720 }
2721 
2722 static void nvme_copy_out_completed_cb(void *opaque, int ret)
2723 {
2724     NvmeCopyAIOCB *iocb = opaque;
2725     NvmeRequest *req = iocb->req;
2726     NvmeNamespace *ns = req->ns;
2727     uint32_t nlb;
2728 
2729     nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL,
2730                                  &nlb, NULL, NULL, NULL);
2731 
2732     if (ret < 0) {
2733         iocb->ret = ret;
2734         goto out;
2735     } else if (iocb->ret < 0) {
2736         goto out;
2737     }
2738 
2739     if (ns->params.zoned) {
2740         nvme_advance_zone_wp(ns, iocb->zone, nlb);
2741     }
2742 
2743     iocb->idx++;
2744     iocb->slba += nlb;
2745 out:
2746     nvme_do_copy(iocb);
2747 }
2748 
2749 static void nvme_copy_out_cb(void *opaque, int ret)
2750 {
2751     NvmeCopyAIOCB *iocb = opaque;
2752     NvmeRequest *req = iocb->req;
2753     NvmeNamespace *ns = req->ns;
2754     uint32_t nlb;
2755     size_t mlen;
2756     uint8_t *mbounce;
2757 
2758     if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
2759         goto out;
2760     }
2761 
2762     nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL,
2763                                  &nlb, NULL, NULL, NULL);
2764 
2765     mlen = nvme_m2b(ns, nlb);
2766     mbounce = iocb->bounce + nvme_l2b(ns, nlb);
2767 
2768     qemu_iovec_reset(&iocb->iov);
2769     qemu_iovec_add(&iocb->iov, mbounce, mlen);
2770 
2771     iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_moff(ns, iocb->slba),
2772                                   &iocb->iov, 0, nvme_copy_out_completed_cb,
2773                                   iocb);
2774 
2775     return;
2776 
2777 out:
2778     nvme_copy_out_completed_cb(iocb, ret);
2779 }
2780 
2781 static void nvme_copy_in_completed_cb(void *opaque, int ret)
2782 {
2783     NvmeCopyAIOCB *iocb = opaque;
2784     NvmeRequest *req = iocb->req;
2785     NvmeNamespace *ns = req->ns;
2786     uint32_t nlb;
2787     uint64_t slba;
2788     uint16_t apptag, appmask;
2789     uint64_t reftag;
2790     size_t len;
2791     uint16_t status;
2792 
2793     if (ret < 0) {
2794         iocb->ret = ret;
2795         goto out;
2796     } else if (iocb->ret < 0) {
2797         goto out;
2798     }
2799 
2800     nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
2801                                  &nlb, &apptag, &appmask, &reftag);
2802     len = nvme_l2b(ns, nlb);
2803 
2804     trace_pci_nvme_copy_out(iocb->slba, nlb);
2805 
2806     if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
2807         NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
2808 
2809         uint16_t prinfor = ((copy->control[0] >> 4) & 0xf);
2810         uint16_t prinfow = ((copy->control[2] >> 2) & 0xf);
2811 
2812         size_t mlen = nvme_m2b(ns, nlb);
2813         uint8_t *mbounce = iocb->bounce + nvme_l2b(ns, nlb);
2814 
2815         status = nvme_dif_mangle_mdata(ns, mbounce, mlen, slba);
2816         if (status) {
2817             goto invalid;
2818         }
2819         status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, prinfor,
2820                                 slba, apptag, appmask, &reftag);
2821         if (status) {
2822             goto invalid;
2823         }
2824 
2825         apptag = le16_to_cpu(copy->apptag);
2826         appmask = le16_to_cpu(copy->appmask);
2827 
2828         if (prinfow & NVME_PRINFO_PRACT) {
2829             status = nvme_check_prinfo(ns, prinfow, iocb->slba, iocb->reftag);
2830             if (status) {
2831                 goto invalid;
2832             }
2833 
2834             nvme_dif_pract_generate_dif(ns, iocb->bounce, len, mbounce, mlen,
2835                                         apptag, &iocb->reftag);
2836         } else {
2837             status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen,
2838                                     prinfow, iocb->slba, apptag, appmask,
2839                                     &iocb->reftag);
2840             if (status) {
2841                 goto invalid;
2842             }
2843         }
2844     }
2845 
2846     status = nvme_check_bounds(ns, iocb->slba, nlb);
2847     if (status) {
2848         goto invalid;
2849     }
2850 
2851     if (ns->params.zoned) {
2852         status = nvme_check_zone_write(ns, iocb->zone, iocb->slba, nlb);
2853         if (status) {
2854             goto invalid;
2855         }
2856 
2857         if (!(iocb->zone->d.za & NVME_ZA_ZRWA_VALID)) {
2858             iocb->zone->w_ptr += nlb;
2859         }
2860     }
2861 
2862     qemu_iovec_reset(&iocb->iov);
2863     qemu_iovec_add(&iocb->iov, iocb->bounce, len);
2864 
2865     iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, iocb->slba),
2866                                   &iocb->iov, 0, nvme_copy_out_cb, iocb);
2867 
2868     return;
2869 
2870 invalid:
2871     req->status = status;
2872     iocb->ret = -1;
2873 out:
2874     nvme_do_copy(iocb);
2875 }
2876 
2877 static void nvme_copy_in_cb(void *opaque, int ret)
2878 {
2879     NvmeCopyAIOCB *iocb = opaque;
2880     NvmeRequest *req = iocb->req;
2881     NvmeNamespace *ns = req->ns;
2882     uint64_t slba;
2883     uint32_t nlb;
2884 
2885     if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
2886         goto out;
2887     }
2888 
2889     nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
2890                                  &nlb, NULL, NULL, NULL);
2891 
2892     qemu_iovec_reset(&iocb->iov);
2893     qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(ns, nlb),
2894                    nvme_m2b(ns, nlb));
2895 
2896     iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_moff(ns, slba),
2897                                  &iocb->iov, 0, nvme_copy_in_completed_cb,
2898                                  iocb);
2899     return;
2900 
2901 out:
2902     nvme_copy_in_completed_cb(iocb, ret);
2903 }
2904 
2905 static void nvme_do_copy(NvmeCopyAIOCB *iocb)
2906 {
2907     NvmeRequest *req = iocb->req;
2908     NvmeNamespace *ns = req->ns;
2909     uint64_t slba;
2910     uint32_t nlb;
2911     size_t len;
2912     uint16_t status;
2913 
2914     if (iocb->ret < 0) {
2915         goto done;
2916     }
2917 
2918     if (iocb->idx == iocb->nr) {
2919         goto done;
2920     }
2921 
2922     nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
2923                                  &nlb, NULL, NULL, NULL);
2924     len = nvme_l2b(ns, nlb);
2925 
2926     trace_pci_nvme_copy_source_range(slba, nlb);
2927 
2928     if (nlb > le16_to_cpu(ns->id_ns.mssrl)) {
2929         status = NVME_CMD_SIZE_LIMIT | NVME_DNR;
2930         goto invalid;
2931     }
2932 
2933     status = nvme_check_bounds(ns, slba, nlb);
2934     if (status) {
2935         goto invalid;
2936     }
2937 
2938     if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
2939         status = nvme_check_dulbe(ns, slba, nlb);
2940         if (status) {
2941             goto invalid;
2942         }
2943     }
2944 
2945     if (ns->params.zoned) {
2946         status = nvme_check_zone_read(ns, slba, nlb);
2947         if (status) {
2948             goto invalid;
2949         }
2950     }
2951 
2952     qemu_iovec_reset(&iocb->iov);
2953     qemu_iovec_add(&iocb->iov, iocb->bounce, len);
2954 
2955     iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_l2b(ns, slba),
2956                                  &iocb->iov, 0, nvme_copy_in_cb, iocb);
2957     return;
2958 
2959 invalid:
2960     req->status = status;
2961     iocb->ret = -1;
2962 done:
2963     nvme_copy_done(iocb);
2964 }
2965 
2966 static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
2967 {
2968     NvmeNamespace *ns = req->ns;
2969     NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
2970     NvmeCopyAIOCB *iocb = blk_aio_get(&nvme_copy_aiocb_info, ns->blkconf.blk,
2971                                       nvme_misc_cb, req);
2972     uint16_t nr = copy->nr + 1;
2973     uint8_t format = copy->control[0] & 0xf;
2974     uint16_t prinfor = ((copy->control[0] >> 4) & 0xf);
2975     uint16_t prinfow = ((copy->control[2] >> 2) & 0xf);
2976     size_t len = sizeof(NvmeCopySourceRangeFormat0);
2977 
2978     uint16_t status;
2979 
2980     trace_pci_nvme_copy(nvme_cid(req), nvme_nsid(ns), nr, format);
2981 
2982     iocb->ranges = NULL;
2983     iocb->zone = NULL;
2984 
2985     if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) &&
2986         ((prinfor & NVME_PRINFO_PRACT) != (prinfow & NVME_PRINFO_PRACT))) {
2987         status = NVME_INVALID_FIELD | NVME_DNR;
2988         goto invalid;
2989     }
2990 
2991     if (!(n->id_ctrl.ocfs & (1 << format))) {
2992         trace_pci_nvme_err_copy_invalid_format(format);
2993         status = NVME_INVALID_FIELD | NVME_DNR;
2994         goto invalid;
2995     }
2996 
2997     if (nr > ns->id_ns.msrc + 1) {
2998         status = NVME_CMD_SIZE_LIMIT | NVME_DNR;
2999         goto invalid;
3000     }
3001 
3002     if ((ns->pif == 0x0 && format != 0x0) ||
3003         (ns->pif != 0x0 && format != 0x1)) {
3004         status = NVME_INVALID_FORMAT | NVME_DNR;
3005         goto invalid;
3006     }
3007 
3008     if (ns->pif) {
3009         len = sizeof(NvmeCopySourceRangeFormat1);
3010     }
3011 
3012     iocb->format = format;
3013     iocb->ranges = g_malloc_n(nr, len);
3014     status = nvme_h2c(n, (uint8_t *)iocb->ranges, len * nr, req);
3015     if (status) {
3016         goto invalid;
3017     }
3018 
3019     iocb->slba = le64_to_cpu(copy->sdlba);
3020 
3021     if (ns->params.zoned) {
3022         iocb->zone = nvme_get_zone_by_slba(ns, iocb->slba);
3023         if (!iocb->zone) {
3024             status = NVME_LBA_RANGE | NVME_DNR;
3025             goto invalid;
3026         }
3027 
3028         status = nvme_zrm_auto(n, ns, iocb->zone);
3029         if (status) {
3030             goto invalid;
3031         }
3032     }
3033 
3034     iocb->req = req;
3035     iocb->ret = 0;
3036     iocb->nr = nr;
3037     iocb->idx = 0;
3038     iocb->reftag = le32_to_cpu(copy->reftag);
3039     iocb->reftag |= (uint64_t)le32_to_cpu(copy->cdw3) << 32;
3040     iocb->bounce = g_malloc_n(le16_to_cpu(ns->id_ns.mssrl),
3041                               ns->lbasz + ns->lbaf.ms);
3042 
3043     qemu_iovec_init(&iocb->iov, 1);
3044 
3045     block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.read, 0,
3046                      BLOCK_ACCT_READ);
3047     block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.write, 0,
3048                      BLOCK_ACCT_WRITE);
3049 
3050     req->aiocb = &iocb->common;
3051     nvme_do_copy(iocb);
3052 
3053     return NVME_NO_COMPLETE;
3054 
3055 invalid:
3056     g_free(iocb->ranges);
3057     qemu_aio_unref(iocb);
3058     return status;
3059 }
3060 
3061 static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req)
3062 {
3063     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
3064     NvmeNamespace *ns = req->ns;
3065     BlockBackend *blk = ns->blkconf.blk;
3066     uint64_t slba = le64_to_cpu(rw->slba);
3067     uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
3068     uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control));
3069     size_t data_len = nvme_l2b(ns, nlb);
3070     size_t len = data_len;
3071     int64_t offset = nvme_l2b(ns, slba);
3072     struct nvme_compare_ctx *ctx = NULL;
3073     uint16_t status;
3074 
3075     trace_pci_nvme_compare(nvme_cid(req), nvme_nsid(ns), slba, nlb);
3076 
3077     if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && (prinfo & NVME_PRINFO_PRACT)) {
3078         return NVME_INVALID_PROT_INFO | NVME_DNR;
3079     }
3080 
3081     if (nvme_ns_ext(ns)) {
3082         len += nvme_m2b(ns, nlb);
3083     }
3084 
3085     status = nvme_check_mdts(n, len);
3086     if (status) {
3087         return status;
3088     }
3089 
3090     status = nvme_check_bounds(ns, slba, nlb);
3091     if (status) {
3092         return status;
3093     }
3094 
3095     if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
3096         status = nvme_check_dulbe(ns, slba, nlb);
3097         if (status) {
3098             return status;
3099         }
3100     }
3101 
3102     status = nvme_map_dptr(n, &req->sg, len, &req->cmd);
3103     if (status) {
3104         return status;
3105     }
3106 
3107     ctx = g_new(struct nvme_compare_ctx, 1);
3108     ctx->data.bounce = g_malloc(data_len);
3109 
3110     req->opaque = ctx;
3111 
3112     qemu_iovec_init(&ctx->data.iov, 1);
3113     qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, data_len);
3114 
3115     block_acct_start(blk_get_stats(blk), &req->acct, data_len,
3116                      BLOCK_ACCT_READ);
3117     req->aiocb = blk_aio_preadv(blk, offset, &ctx->data.iov, 0,
3118                                 nvme_compare_data_cb, req);
3119 
3120     return NVME_NO_COMPLETE;
3121 }
3122 
3123 typedef struct NvmeFlushAIOCB {
3124     BlockAIOCB common;
3125     BlockAIOCB *aiocb;
3126     NvmeRequest *req;
3127     int ret;
3128 
3129     NvmeNamespace *ns;
3130     uint32_t nsid;
3131     bool broadcast;
3132 } NvmeFlushAIOCB;
3133 
3134 static void nvme_flush_cancel(BlockAIOCB *acb)
3135 {
3136     NvmeFlushAIOCB *iocb = container_of(acb, NvmeFlushAIOCB, common);
3137 
3138     iocb->ret = -ECANCELED;
3139 
3140     if (iocb->aiocb) {
3141         blk_aio_cancel_async(iocb->aiocb);
3142         iocb->aiocb = NULL;
3143     }
3144 }
3145 
3146 static const AIOCBInfo nvme_flush_aiocb_info = {
3147     .aiocb_size = sizeof(NvmeFlushAIOCB),
3148     .cancel_async = nvme_flush_cancel,
3149     .get_aio_context = nvme_get_aio_context,
3150 };
3151 
3152 static void nvme_do_flush(NvmeFlushAIOCB *iocb);
3153 
3154 static void nvme_flush_ns_cb(void *opaque, int ret)
3155 {
3156     NvmeFlushAIOCB *iocb = opaque;
3157     NvmeNamespace *ns = iocb->ns;
3158 
3159     if (ret < 0) {
3160         iocb->ret = ret;
3161         goto out;
3162     } else if (iocb->ret < 0) {
3163         goto out;
3164     }
3165 
3166     if (ns) {
3167         trace_pci_nvme_flush_ns(iocb->nsid);
3168 
3169         iocb->ns = NULL;
3170         iocb->aiocb = blk_aio_flush(ns->blkconf.blk, nvme_flush_ns_cb, iocb);
3171         return;
3172     }
3173 
3174 out:
3175     nvme_do_flush(iocb);
3176 }
3177 
3178 static void nvme_do_flush(NvmeFlushAIOCB *iocb)
3179 {
3180     NvmeRequest *req = iocb->req;
3181     NvmeCtrl *n = nvme_ctrl(req);
3182     int i;
3183 
3184     if (iocb->ret < 0) {
3185         goto done;
3186     }
3187 
3188     if (iocb->broadcast) {
3189         for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) {
3190             iocb->ns = nvme_ns(n, i);
3191             if (iocb->ns) {
3192                 iocb->nsid = i;
3193                 break;
3194             }
3195         }
3196     }
3197 
3198     if (!iocb->ns) {
3199         goto done;
3200     }
3201 
3202     nvme_flush_ns_cb(iocb, 0);
3203     return;
3204 
3205 done:
3206     iocb->common.cb(iocb->common.opaque, iocb->ret);
3207     qemu_aio_unref(iocb);
3208 }
3209 
3210 static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
3211 {
3212     NvmeFlushAIOCB *iocb;
3213     uint32_t nsid = le32_to_cpu(req->cmd.nsid);
3214     uint16_t status;
3215 
3216     iocb = qemu_aio_get(&nvme_flush_aiocb_info, NULL, nvme_misc_cb, req);
3217 
3218     iocb->req = req;
3219     iocb->ret = 0;
3220     iocb->ns = NULL;
3221     iocb->nsid = 0;
3222     iocb->broadcast = (nsid == NVME_NSID_BROADCAST);
3223 
3224     if (!iocb->broadcast) {
3225         if (!nvme_nsid_valid(n, nsid)) {
3226             status = NVME_INVALID_NSID | NVME_DNR;
3227             goto out;
3228         }
3229 
3230         iocb->ns = nvme_ns(n, nsid);
3231         if (!iocb->ns) {
3232             status = NVME_INVALID_FIELD | NVME_DNR;
3233             goto out;
3234         }
3235 
3236         iocb->nsid = nsid;
3237     }
3238 
3239     req->aiocb = &iocb->common;
3240     nvme_do_flush(iocb);
3241 
3242     return NVME_NO_COMPLETE;
3243 
3244 out:
3245     qemu_aio_unref(iocb);
3246 
3247     return status;
3248 }
3249 
3250 static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
3251 {
3252     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
3253     NvmeNamespace *ns = req->ns;
3254     uint64_t slba = le64_to_cpu(rw->slba);
3255     uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
3256     uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control));
3257     uint64_t data_size = nvme_l2b(ns, nlb);
3258     uint64_t mapped_size = data_size;
3259     uint64_t data_offset;
3260     BlockBackend *blk = ns->blkconf.blk;
3261     uint16_t status;
3262 
3263     if (nvme_ns_ext(ns)) {
3264         mapped_size += nvme_m2b(ns, nlb);
3265 
3266         if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
3267             bool pract = prinfo & NVME_PRINFO_PRACT;
3268 
3269             if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) {
3270                 mapped_size = data_size;
3271             }
3272         }
3273     }
3274 
3275     trace_pci_nvme_read(nvme_cid(req), nvme_nsid(ns), nlb, mapped_size, slba);
3276 
3277     status = nvme_check_mdts(n, mapped_size);
3278     if (status) {
3279         goto invalid;
3280     }
3281 
3282     status = nvme_check_bounds(ns, slba, nlb);
3283     if (status) {
3284         goto invalid;
3285     }
3286 
3287     if (ns->params.zoned) {
3288         status = nvme_check_zone_read(ns, slba, nlb);
3289         if (status) {
3290             trace_pci_nvme_err_zone_read_not_ok(slba, nlb, status);
3291             goto invalid;
3292         }
3293     }
3294 
3295     if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
3296         status = nvme_check_dulbe(ns, slba, nlb);
3297         if (status) {
3298             goto invalid;
3299         }
3300     }
3301 
3302     if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
3303         return nvme_dif_rw(n, req);
3304     }
3305 
3306     status = nvme_map_data(n, nlb, req);
3307     if (status) {
3308         goto invalid;
3309     }
3310 
3311     data_offset = nvme_l2b(ns, slba);
3312 
3313     block_acct_start(blk_get_stats(blk), &req->acct, data_size,
3314                      BLOCK_ACCT_READ);
3315     nvme_blk_read(blk, data_offset, nvme_rw_cb, req);
3316     return NVME_NO_COMPLETE;
3317 
3318 invalid:
3319     block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ);
3320     return status | NVME_DNR;
3321 }
3322 
3323 static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,
3324                               bool wrz)
3325 {
3326     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
3327     NvmeNamespace *ns = req->ns;
3328     uint64_t slba = le64_to_cpu(rw->slba);
3329     uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
3330     uint16_t ctrl = le16_to_cpu(rw->control);
3331     uint8_t prinfo = NVME_RW_PRINFO(ctrl);
3332     uint64_t data_size = nvme_l2b(ns, nlb);
3333     uint64_t mapped_size = data_size;
3334     uint64_t data_offset;
3335     NvmeZone *zone;
3336     NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe;
3337     BlockBackend *blk = ns->blkconf.blk;
3338     uint16_t status;
3339 
3340     if (nvme_ns_ext(ns)) {
3341         mapped_size += nvme_m2b(ns, nlb);
3342 
3343         if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
3344             bool pract = prinfo & NVME_PRINFO_PRACT;
3345 
3346             if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) {
3347                 mapped_size -= nvme_m2b(ns, nlb);
3348             }
3349         }
3350     }
3351 
3352     trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode),
3353                          nvme_nsid(ns), nlb, mapped_size, slba);
3354 
3355     if (!wrz) {
3356         status = nvme_check_mdts(n, mapped_size);
3357         if (status) {
3358             goto invalid;
3359         }
3360     }
3361 
3362     status = nvme_check_bounds(ns, slba, nlb);
3363     if (status) {
3364         goto invalid;
3365     }
3366 
3367     if (ns->params.zoned) {
3368         zone = nvme_get_zone_by_slba(ns, slba);
3369         assert(zone);
3370 
3371         if (append) {
3372             bool piremap = !!(ctrl & NVME_RW_PIREMAP);
3373 
3374             if (unlikely(zone->d.za & NVME_ZA_ZRWA_VALID)) {
3375                 return NVME_INVALID_ZONE_OP | NVME_DNR;
3376             }
3377 
3378             if (unlikely(slba != zone->d.zslba)) {
3379                 trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba);
3380                 status = NVME_INVALID_FIELD;
3381                 goto invalid;
3382             }
3383 
3384             if (n->params.zasl &&
3385                 data_size > (uint64_t)n->page_size << n->params.zasl) {
3386                 trace_pci_nvme_err_zasl(data_size);
3387                 return NVME_INVALID_FIELD | NVME_DNR;
3388             }
3389 
3390             slba = zone->w_ptr;
3391             rw->slba = cpu_to_le64(slba);
3392             res->slba = cpu_to_le64(slba);
3393 
3394             switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
3395             case NVME_ID_NS_DPS_TYPE_1:
3396                 if (!piremap) {
3397                     return NVME_INVALID_PROT_INFO | NVME_DNR;
3398                 }
3399 
3400                 /* fallthrough */
3401 
3402             case NVME_ID_NS_DPS_TYPE_2:
3403                 if (piremap) {
3404                     uint32_t reftag = le32_to_cpu(rw->reftag);
3405                     rw->reftag = cpu_to_le32(reftag + (slba - zone->d.zslba));
3406                 }
3407 
3408                 break;
3409 
3410             case NVME_ID_NS_DPS_TYPE_3:
3411                 if (piremap) {
3412                     return NVME_INVALID_PROT_INFO | NVME_DNR;
3413                 }
3414 
3415                 break;
3416             }
3417         }
3418 
3419         status = nvme_check_zone_write(ns, zone, slba, nlb);
3420         if (status) {
3421             goto invalid;
3422         }
3423 
3424         status = nvme_zrm_auto(n, ns, zone);
3425         if (status) {
3426             goto invalid;
3427         }
3428 
3429         if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) {
3430             zone->w_ptr += nlb;
3431         }
3432     }
3433 
3434     data_offset = nvme_l2b(ns, slba);
3435 
3436     if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
3437         return nvme_dif_rw(n, req);
3438     }
3439 
3440     if (!wrz) {
3441         status = nvme_map_data(n, nlb, req);
3442         if (status) {
3443             goto invalid;
3444         }
3445 
3446         block_acct_start(blk_get_stats(blk), &req->acct, data_size,
3447                          BLOCK_ACCT_WRITE);
3448         nvme_blk_write(blk, data_offset, nvme_rw_cb, req);
3449     } else {
3450         req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size,
3451                                            BDRV_REQ_MAY_UNMAP, nvme_rw_cb,
3452                                            req);
3453     }
3454 
3455     return NVME_NO_COMPLETE;
3456 
3457 invalid:
3458     block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE);
3459     return status | NVME_DNR;
3460 }
3461 
3462 static inline uint16_t nvme_write(NvmeCtrl *n, NvmeRequest *req)
3463 {
3464     return nvme_do_write(n, req, false, false);
3465 }
3466 
3467 static inline uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
3468 {
3469     return nvme_do_write(n, req, false, true);
3470 }
3471 
3472 static inline uint16_t nvme_zone_append(NvmeCtrl *n, NvmeRequest *req)
3473 {
3474     return nvme_do_write(n, req, true, false);
3475 }
3476 
3477 static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace *ns, NvmeCmd *c,
3478                                             uint64_t *slba, uint32_t *zone_idx)
3479 {
3480     uint32_t dw10 = le32_to_cpu(c->cdw10);
3481     uint32_t dw11 = le32_to_cpu(c->cdw11);
3482 
3483     if (!ns->params.zoned) {
3484         trace_pci_nvme_err_invalid_opc(c->opcode);
3485         return NVME_INVALID_OPCODE | NVME_DNR;
3486     }
3487 
3488     *slba = ((uint64_t)dw11) << 32 | dw10;
3489     if (unlikely(*slba >= ns->id_ns.nsze)) {
3490         trace_pci_nvme_err_invalid_lba_range(*slba, 0, ns->id_ns.nsze);
3491         *slba = 0;
3492         return NVME_LBA_RANGE | NVME_DNR;
3493     }
3494 
3495     *zone_idx = nvme_zone_idx(ns, *slba);
3496     assert(*zone_idx < ns->num_zones);
3497 
3498     return NVME_SUCCESS;
3499 }
3500 
3501 typedef uint16_t (*op_handler_t)(NvmeNamespace *, NvmeZone *, NvmeZoneState,
3502                                  NvmeRequest *);
3503 
3504 enum NvmeZoneProcessingMask {
3505     NVME_PROC_CURRENT_ZONE    = 0,
3506     NVME_PROC_OPENED_ZONES    = 1 << 0,
3507     NVME_PROC_CLOSED_ZONES    = 1 << 1,
3508     NVME_PROC_READ_ONLY_ZONES = 1 << 2,
3509     NVME_PROC_FULL_ZONES      = 1 << 3,
3510 };
3511 
3512 static uint16_t nvme_open_zone(NvmeNamespace *ns, NvmeZone *zone,
3513                                NvmeZoneState state, NvmeRequest *req)
3514 {
3515     NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd;
3516     int flags = 0;
3517 
3518     if (cmd->zsflags & NVME_ZSFLAG_ZRWA_ALLOC) {
3519         uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs);
3520 
3521         if (!(ozcs & NVME_ID_NS_ZONED_OZCS_ZRWASUP)) {
3522             return NVME_INVALID_ZONE_OP | NVME_DNR;
3523         }
3524 
3525         if (zone->w_ptr % ns->zns.zrwafg) {
3526             return NVME_NOZRWA | NVME_DNR;
3527         }
3528 
3529         flags = NVME_ZRM_ZRWA;
3530     }
3531 
3532     return nvme_zrm_open_flags(nvme_ctrl(req), ns, zone, flags);
3533 }
3534 
3535 static uint16_t nvme_close_zone(NvmeNamespace *ns, NvmeZone *zone,
3536                                 NvmeZoneState state, NvmeRequest *req)
3537 {
3538     return nvme_zrm_close(ns, zone);
3539 }
3540 
3541 static uint16_t nvme_finish_zone(NvmeNamespace *ns, NvmeZone *zone,
3542                                  NvmeZoneState state, NvmeRequest *req)
3543 {
3544     return nvme_zrm_finish(ns, zone);
3545 }
3546 
3547 static uint16_t nvme_offline_zone(NvmeNamespace *ns, NvmeZone *zone,
3548                                   NvmeZoneState state, NvmeRequest *req)
3549 {
3550     switch (state) {
3551     case NVME_ZONE_STATE_READ_ONLY:
3552         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_OFFLINE);
3553         /* fall through */
3554     case NVME_ZONE_STATE_OFFLINE:
3555         return NVME_SUCCESS;
3556     default:
3557         return NVME_ZONE_INVAL_TRANSITION;
3558     }
3559 }
3560 
3561 static uint16_t nvme_set_zd_ext(NvmeNamespace *ns, NvmeZone *zone)
3562 {
3563     uint16_t status;
3564     uint8_t state = nvme_get_zone_state(zone);
3565 
3566     if (state == NVME_ZONE_STATE_EMPTY) {
3567         status = nvme_aor_check(ns, 1, 0);
3568         if (status) {
3569             return status;
3570         }
3571         nvme_aor_inc_active(ns);
3572         zone->d.za |= NVME_ZA_ZD_EXT_VALID;
3573         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
3574         return NVME_SUCCESS;
3575     }
3576 
3577     return NVME_ZONE_INVAL_TRANSITION;
3578 }
3579 
3580 static uint16_t nvme_bulk_proc_zone(NvmeNamespace *ns, NvmeZone *zone,
3581                                     enum NvmeZoneProcessingMask proc_mask,
3582                                     op_handler_t op_hndlr, NvmeRequest *req)
3583 {
3584     uint16_t status = NVME_SUCCESS;
3585     NvmeZoneState zs = nvme_get_zone_state(zone);
3586     bool proc_zone;
3587 
3588     switch (zs) {
3589     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
3590     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
3591         proc_zone = proc_mask & NVME_PROC_OPENED_ZONES;
3592         break;
3593     case NVME_ZONE_STATE_CLOSED:
3594         proc_zone = proc_mask & NVME_PROC_CLOSED_ZONES;
3595         break;
3596     case NVME_ZONE_STATE_READ_ONLY:
3597         proc_zone = proc_mask & NVME_PROC_READ_ONLY_ZONES;
3598         break;
3599     case NVME_ZONE_STATE_FULL:
3600         proc_zone = proc_mask & NVME_PROC_FULL_ZONES;
3601         break;
3602     default:
3603         proc_zone = false;
3604     }
3605 
3606     if (proc_zone) {
3607         status = op_hndlr(ns, zone, zs, req);
3608     }
3609 
3610     return status;
3611 }
3612 
3613 static uint16_t nvme_do_zone_op(NvmeNamespace *ns, NvmeZone *zone,
3614                                 enum NvmeZoneProcessingMask proc_mask,
3615                                 op_handler_t op_hndlr, NvmeRequest *req)
3616 {
3617     NvmeZone *next;
3618     uint16_t status = NVME_SUCCESS;
3619     int i;
3620 
3621     if (!proc_mask) {
3622         status = op_hndlr(ns, zone, nvme_get_zone_state(zone), req);
3623     } else {
3624         if (proc_mask & NVME_PROC_CLOSED_ZONES) {
3625             QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) {
3626                 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
3627                                              req);
3628                 if (status && status != NVME_NO_COMPLETE) {
3629                     goto out;
3630                 }
3631             }
3632         }
3633         if (proc_mask & NVME_PROC_OPENED_ZONES) {
3634             QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) {
3635                 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
3636                                              req);
3637                 if (status && status != NVME_NO_COMPLETE) {
3638                     goto out;
3639                 }
3640             }
3641 
3642             QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) {
3643                 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
3644                                              req);
3645                 if (status && status != NVME_NO_COMPLETE) {
3646                     goto out;
3647                 }
3648             }
3649         }
3650         if (proc_mask & NVME_PROC_FULL_ZONES) {
3651             QTAILQ_FOREACH_SAFE(zone, &ns->full_zones, entry, next) {
3652                 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
3653                                              req);
3654                 if (status && status != NVME_NO_COMPLETE) {
3655                     goto out;
3656                 }
3657             }
3658         }
3659 
3660         if (proc_mask & NVME_PROC_READ_ONLY_ZONES) {
3661             for (i = 0; i < ns->num_zones; i++, zone++) {
3662                 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
3663                                              req);
3664                 if (status && status != NVME_NO_COMPLETE) {
3665                     goto out;
3666                 }
3667             }
3668         }
3669     }
3670 
3671 out:
3672     return status;
3673 }
3674 
3675 typedef struct NvmeZoneResetAIOCB {
3676     BlockAIOCB common;
3677     BlockAIOCB *aiocb;
3678     NvmeRequest *req;
3679     int ret;
3680 
3681     bool all;
3682     int idx;
3683     NvmeZone *zone;
3684 } NvmeZoneResetAIOCB;
3685 
3686 static void nvme_zone_reset_cancel(BlockAIOCB *aiocb)
3687 {
3688     NvmeZoneResetAIOCB *iocb = container_of(aiocb, NvmeZoneResetAIOCB, common);
3689     NvmeRequest *req = iocb->req;
3690     NvmeNamespace *ns = req->ns;
3691 
3692     iocb->idx = ns->num_zones;
3693 
3694     iocb->ret = -ECANCELED;
3695 
3696     if (iocb->aiocb) {
3697         blk_aio_cancel_async(iocb->aiocb);
3698         iocb->aiocb = NULL;
3699     }
3700 }
3701 
3702 static const AIOCBInfo nvme_zone_reset_aiocb_info = {
3703     .aiocb_size = sizeof(NvmeZoneResetAIOCB),
3704     .cancel_async = nvme_zone_reset_cancel,
3705 };
3706 
3707 static void nvme_zone_reset_cb(void *opaque, int ret);
3708 
3709 static void nvme_zone_reset_epilogue_cb(void *opaque, int ret)
3710 {
3711     NvmeZoneResetAIOCB *iocb = opaque;
3712     NvmeRequest *req = iocb->req;
3713     NvmeNamespace *ns = req->ns;
3714     int64_t moff;
3715     int count;
3716 
3717     if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
3718         goto out;
3719     }
3720 
3721     moff = nvme_moff(ns, iocb->zone->d.zslba);
3722     count = nvme_m2b(ns, ns->zone_size);
3723 
3724     iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, moff, count,
3725                                         BDRV_REQ_MAY_UNMAP,
3726                                         nvme_zone_reset_cb, iocb);
3727     return;
3728 
3729 out:
3730     nvme_zone_reset_cb(iocb, ret);
3731 }
3732 
3733 static void nvme_zone_reset_cb(void *opaque, int ret)
3734 {
3735     NvmeZoneResetAIOCB *iocb = opaque;
3736     NvmeRequest *req = iocb->req;
3737     NvmeNamespace *ns = req->ns;
3738 
3739     if (iocb->ret < 0) {
3740         goto done;
3741     } else if (ret < 0) {
3742         iocb->ret = ret;
3743         goto done;
3744     }
3745 
3746     if (iocb->zone) {
3747         nvme_zrm_reset(ns, iocb->zone);
3748 
3749         if (!iocb->all) {
3750             goto done;
3751         }
3752     }
3753 
3754     while (iocb->idx < ns->num_zones) {
3755         NvmeZone *zone = &ns->zone_array[iocb->idx++];
3756 
3757         switch (nvme_get_zone_state(zone)) {
3758         case NVME_ZONE_STATE_EMPTY:
3759             if (!iocb->all) {
3760                 goto done;
3761             }
3762 
3763             continue;
3764 
3765         case NVME_ZONE_STATE_EXPLICITLY_OPEN:
3766         case NVME_ZONE_STATE_IMPLICITLY_OPEN:
3767         case NVME_ZONE_STATE_CLOSED:
3768         case NVME_ZONE_STATE_FULL:
3769             iocb->zone = zone;
3770             break;
3771 
3772         default:
3773             continue;
3774         }
3775 
3776         trace_pci_nvme_zns_zone_reset(zone->d.zslba);
3777 
3778         iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk,
3779                                             nvme_l2b(ns, zone->d.zslba),
3780                                             nvme_l2b(ns, ns->zone_size),
3781                                             BDRV_REQ_MAY_UNMAP,
3782                                             nvme_zone_reset_epilogue_cb,
3783                                             iocb);
3784         return;
3785     }
3786 
3787 done:
3788     iocb->aiocb = NULL;
3789 
3790     iocb->common.cb(iocb->common.opaque, iocb->ret);
3791     qemu_aio_unref(iocb);
3792 }
3793 
3794 static uint16_t nvme_zone_mgmt_send_zrwa_flush(NvmeCtrl *n, NvmeZone *zone,
3795                                                uint64_t elba, NvmeRequest *req)
3796 {
3797     NvmeNamespace *ns = req->ns;
3798     uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs);
3799     uint64_t wp = zone->d.wp;
3800     uint32_t nlb = elba - wp + 1;
3801     uint16_t status;
3802 
3803 
3804     if (!(ozcs & NVME_ID_NS_ZONED_OZCS_ZRWASUP)) {
3805         return NVME_INVALID_ZONE_OP | NVME_DNR;
3806     }
3807 
3808     if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) {
3809         return NVME_INVALID_FIELD | NVME_DNR;
3810     }
3811 
3812     if (elba < wp || elba > wp + ns->zns.zrwas) {
3813         return NVME_ZONE_BOUNDARY_ERROR | NVME_DNR;
3814     }
3815 
3816     if (nlb % ns->zns.zrwafg) {
3817         return NVME_INVALID_FIELD | NVME_DNR;
3818     }
3819 
3820     status = nvme_zrm_auto(n, ns, zone);
3821     if (status) {
3822         return status;
3823     }
3824 
3825     zone->w_ptr += nlb;
3826 
3827     nvme_advance_zone_wp(ns, zone, nlb);
3828 
3829     return NVME_SUCCESS;
3830 }
3831 
3832 static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req)
3833 {
3834     NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd;
3835     NvmeNamespace *ns = req->ns;
3836     NvmeZone *zone;
3837     NvmeZoneResetAIOCB *iocb;
3838     uint8_t *zd_ext;
3839     uint64_t slba = 0;
3840     uint32_t zone_idx = 0;
3841     uint16_t status;
3842     uint8_t action = cmd->zsa;
3843     bool all;
3844     enum NvmeZoneProcessingMask proc_mask = NVME_PROC_CURRENT_ZONE;
3845 
3846     all = cmd->zsflags & NVME_ZSFLAG_SELECT_ALL;
3847 
3848     req->status = NVME_SUCCESS;
3849 
3850     if (!all) {
3851         status = nvme_get_mgmt_zone_slba_idx(ns, &req->cmd, &slba, &zone_idx);
3852         if (status) {
3853             return status;
3854         }
3855     }
3856 
3857     zone = &ns->zone_array[zone_idx];
3858     if (slba != zone->d.zslba && action != NVME_ZONE_ACTION_ZRWA_FLUSH) {
3859         trace_pci_nvme_err_unaligned_zone_cmd(action, slba, zone->d.zslba);
3860         return NVME_INVALID_FIELD | NVME_DNR;
3861     }
3862 
3863     switch (action) {
3864 
3865     case NVME_ZONE_ACTION_OPEN:
3866         if (all) {
3867             proc_mask = NVME_PROC_CLOSED_ZONES;
3868         }
3869         trace_pci_nvme_open_zone(slba, zone_idx, all);
3870         status = nvme_do_zone_op(ns, zone, proc_mask, nvme_open_zone, req);
3871         break;
3872 
3873     case NVME_ZONE_ACTION_CLOSE:
3874         if (all) {
3875             proc_mask = NVME_PROC_OPENED_ZONES;
3876         }
3877         trace_pci_nvme_close_zone(slba, zone_idx, all);
3878         status = nvme_do_zone_op(ns, zone, proc_mask, nvme_close_zone, req);
3879         break;
3880 
3881     case NVME_ZONE_ACTION_FINISH:
3882         if (all) {
3883             proc_mask = NVME_PROC_OPENED_ZONES | NVME_PROC_CLOSED_ZONES;
3884         }
3885         trace_pci_nvme_finish_zone(slba, zone_idx, all);
3886         status = nvme_do_zone_op(ns, zone, proc_mask, nvme_finish_zone, req);
3887         break;
3888 
3889     case NVME_ZONE_ACTION_RESET:
3890         trace_pci_nvme_reset_zone(slba, zone_idx, all);
3891 
3892         iocb = blk_aio_get(&nvme_zone_reset_aiocb_info, ns->blkconf.blk,
3893                            nvme_misc_cb, req);
3894 
3895         iocb->req = req;
3896         iocb->ret = 0;
3897         iocb->all = all;
3898         iocb->idx = zone_idx;
3899         iocb->zone = NULL;
3900 
3901         req->aiocb = &iocb->common;
3902         nvme_zone_reset_cb(iocb, 0);
3903 
3904         return NVME_NO_COMPLETE;
3905 
3906     case NVME_ZONE_ACTION_OFFLINE:
3907         if (all) {
3908             proc_mask = NVME_PROC_READ_ONLY_ZONES;
3909         }
3910         trace_pci_nvme_offline_zone(slba, zone_idx, all);
3911         status = nvme_do_zone_op(ns, zone, proc_mask, nvme_offline_zone, req);
3912         break;
3913 
3914     case NVME_ZONE_ACTION_SET_ZD_EXT:
3915         trace_pci_nvme_set_descriptor_extension(slba, zone_idx);
3916         if (all || !ns->params.zd_extension_size) {
3917             return NVME_INVALID_FIELD | NVME_DNR;
3918         }
3919         zd_ext = nvme_get_zd_extension(ns, zone_idx);
3920         status = nvme_h2c(n, zd_ext, ns->params.zd_extension_size, req);
3921         if (status) {
3922             trace_pci_nvme_err_zd_extension_map_error(zone_idx);
3923             return status;
3924         }
3925 
3926         status = nvme_set_zd_ext(ns, zone);
3927         if (status == NVME_SUCCESS) {
3928             trace_pci_nvme_zd_extension_set(zone_idx);
3929             return status;
3930         }
3931         break;
3932 
3933     case NVME_ZONE_ACTION_ZRWA_FLUSH:
3934         if (all) {
3935             return NVME_INVALID_FIELD | NVME_DNR;
3936         }
3937 
3938         return nvme_zone_mgmt_send_zrwa_flush(n, zone, slba, req);
3939 
3940     default:
3941         trace_pci_nvme_err_invalid_mgmt_action(action);
3942         status = NVME_INVALID_FIELD;
3943     }
3944 
3945     if (status == NVME_ZONE_INVAL_TRANSITION) {
3946         trace_pci_nvme_err_invalid_zone_state_transition(action, slba,
3947                                                          zone->d.za);
3948     }
3949     if (status) {
3950         status |= NVME_DNR;
3951     }
3952 
3953     return status;
3954 }
3955 
3956 static bool nvme_zone_matches_filter(uint32_t zafs, NvmeZone *zl)
3957 {
3958     NvmeZoneState zs = nvme_get_zone_state(zl);
3959 
3960     switch (zafs) {
3961     case NVME_ZONE_REPORT_ALL:
3962         return true;
3963     case NVME_ZONE_REPORT_EMPTY:
3964         return zs == NVME_ZONE_STATE_EMPTY;
3965     case NVME_ZONE_REPORT_IMPLICITLY_OPEN:
3966         return zs == NVME_ZONE_STATE_IMPLICITLY_OPEN;
3967     case NVME_ZONE_REPORT_EXPLICITLY_OPEN:
3968         return zs == NVME_ZONE_STATE_EXPLICITLY_OPEN;
3969     case NVME_ZONE_REPORT_CLOSED:
3970         return zs == NVME_ZONE_STATE_CLOSED;
3971     case NVME_ZONE_REPORT_FULL:
3972         return zs == NVME_ZONE_STATE_FULL;
3973     case NVME_ZONE_REPORT_READ_ONLY:
3974         return zs == NVME_ZONE_STATE_READ_ONLY;
3975     case NVME_ZONE_REPORT_OFFLINE:
3976         return zs == NVME_ZONE_STATE_OFFLINE;
3977     default:
3978         return false;
3979     }
3980 }
3981 
3982 static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
3983 {
3984     NvmeCmd *cmd = (NvmeCmd *)&req->cmd;
3985     NvmeNamespace *ns = req->ns;
3986     /* cdw12 is zero-based number of dwords to return. Convert to bytes */
3987     uint32_t data_size = (le32_to_cpu(cmd->cdw12) + 1) << 2;
3988     uint32_t dw13 = le32_to_cpu(cmd->cdw13);
3989     uint32_t zone_idx, zra, zrasf, partial;
3990     uint64_t max_zones, nr_zones = 0;
3991     uint16_t status;
3992     uint64_t slba;
3993     NvmeZoneDescr *z;
3994     NvmeZone *zone;
3995     NvmeZoneReportHeader *header;
3996     void *buf, *buf_p;
3997     size_t zone_entry_sz;
3998     int i;
3999 
4000     req->status = NVME_SUCCESS;
4001 
4002     status = nvme_get_mgmt_zone_slba_idx(ns, cmd, &slba, &zone_idx);
4003     if (status) {
4004         return status;
4005     }
4006 
4007     zra = dw13 & 0xff;
4008     if (zra != NVME_ZONE_REPORT && zra != NVME_ZONE_REPORT_EXTENDED) {
4009         return NVME_INVALID_FIELD | NVME_DNR;
4010     }
4011     if (zra == NVME_ZONE_REPORT_EXTENDED && !ns->params.zd_extension_size) {
4012         return NVME_INVALID_FIELD | NVME_DNR;
4013     }
4014 
4015     zrasf = (dw13 >> 8) & 0xff;
4016     if (zrasf > NVME_ZONE_REPORT_OFFLINE) {
4017         return NVME_INVALID_FIELD | NVME_DNR;
4018     }
4019 
4020     if (data_size < sizeof(NvmeZoneReportHeader)) {
4021         return NVME_INVALID_FIELD | NVME_DNR;
4022     }
4023 
4024     status = nvme_check_mdts(n, data_size);
4025     if (status) {
4026         return status;
4027     }
4028 
4029     partial = (dw13 >> 16) & 0x01;
4030 
4031     zone_entry_sz = sizeof(NvmeZoneDescr);
4032     if (zra == NVME_ZONE_REPORT_EXTENDED) {
4033         zone_entry_sz += ns->params.zd_extension_size;
4034     }
4035 
4036     max_zones = (data_size - sizeof(NvmeZoneReportHeader)) / zone_entry_sz;
4037     buf = g_malloc0(data_size);
4038 
4039     zone = &ns->zone_array[zone_idx];
4040     for (i = zone_idx; i < ns->num_zones; i++) {
4041         if (partial && nr_zones >= max_zones) {
4042             break;
4043         }
4044         if (nvme_zone_matches_filter(zrasf, zone++)) {
4045             nr_zones++;
4046         }
4047     }
4048     header = buf;
4049     header->nr_zones = cpu_to_le64(nr_zones);
4050 
4051     buf_p = buf + sizeof(NvmeZoneReportHeader);
4052     for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) {
4053         zone = &ns->zone_array[zone_idx];
4054         if (nvme_zone_matches_filter(zrasf, zone)) {
4055             z = buf_p;
4056             buf_p += sizeof(NvmeZoneDescr);
4057 
4058             z->zt = zone->d.zt;
4059             z->zs = zone->d.zs;
4060             z->zcap = cpu_to_le64(zone->d.zcap);
4061             z->zslba = cpu_to_le64(zone->d.zslba);
4062             z->za = zone->d.za;
4063 
4064             if (nvme_wp_is_valid(zone)) {
4065                 z->wp = cpu_to_le64(zone->d.wp);
4066             } else {
4067                 z->wp = cpu_to_le64(~0ULL);
4068             }
4069 
4070             if (zra == NVME_ZONE_REPORT_EXTENDED) {
4071                 if (zone->d.za & NVME_ZA_ZD_EXT_VALID) {
4072                     memcpy(buf_p, nvme_get_zd_extension(ns, zone_idx),
4073                            ns->params.zd_extension_size);
4074                 }
4075                 buf_p += ns->params.zd_extension_size;
4076             }
4077 
4078             max_zones--;
4079         }
4080     }
4081 
4082     status = nvme_c2h(n, (uint8_t *)buf, data_size, req);
4083 
4084     g_free(buf);
4085 
4086     return status;
4087 }
4088 
4089 static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
4090 {
4091     NvmeNamespace *ns;
4092     uint32_t nsid = le32_to_cpu(req->cmd.nsid);
4093 
4094     trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req),
4095                           req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode));
4096 
4097     if (!nvme_nsid_valid(n, nsid)) {
4098         return NVME_INVALID_NSID | NVME_DNR;
4099     }
4100 
4101     /*
4102      * In the base NVM command set, Flush may apply to all namespaces
4103      * (indicated by NSID being set to FFFFFFFFh). But if that feature is used
4104      * along with TP 4056 (Namespace Types), it may be pretty screwed up.
4105      *
4106      * If NSID is indeed set to FFFFFFFFh, we simply cannot associate the
4107      * opcode with a specific command since we cannot determine a unique I/O
4108      * command set. Opcode 0h could have any other meaning than something
4109      * equivalent to flushing and say it DOES have completely different
4110      * semantics in some other command set - does an NSID of FFFFFFFFh then
4111      * mean "for all namespaces, apply whatever command set specific command
4112      * that uses the 0h opcode?" Or does it mean "for all namespaces, apply
4113      * whatever command that uses the 0h opcode if, and only if, it allows NSID
4114      * to be FFFFFFFFh"?
4115      *
4116      * Anyway (and luckily), for now, we do not care about this since the
4117      * device only supports namespace types that includes the NVM Flush command
4118      * (NVM and Zoned), so always do an NVM Flush.
4119      */
4120     if (req->cmd.opcode == NVME_CMD_FLUSH) {
4121         return nvme_flush(n, req);
4122     }
4123 
4124     ns = nvme_ns(n, nsid);
4125     if (unlikely(!ns)) {
4126         return NVME_INVALID_FIELD | NVME_DNR;
4127     }
4128 
4129     if (!(ns->iocs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
4130         trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
4131         return NVME_INVALID_OPCODE | NVME_DNR;
4132     }
4133 
4134     if (ns->status) {
4135         return ns->status;
4136     }
4137 
4138     if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) {
4139         return NVME_INVALID_FIELD;
4140     }
4141 
4142     req->ns = ns;
4143 
4144     switch (req->cmd.opcode) {
4145     case NVME_CMD_WRITE_ZEROES:
4146         return nvme_write_zeroes(n, req);
4147     case NVME_CMD_ZONE_APPEND:
4148         return nvme_zone_append(n, req);
4149     case NVME_CMD_WRITE:
4150         return nvme_write(n, req);
4151     case NVME_CMD_READ:
4152         return nvme_read(n, req);
4153     case NVME_CMD_COMPARE:
4154         return nvme_compare(n, req);
4155     case NVME_CMD_DSM:
4156         return nvme_dsm(n, req);
4157     case NVME_CMD_VERIFY:
4158         return nvme_verify(n, req);
4159     case NVME_CMD_COPY:
4160         return nvme_copy(n, req);
4161     case NVME_CMD_ZONE_MGMT_SEND:
4162         return nvme_zone_mgmt_send(n, req);
4163     case NVME_CMD_ZONE_MGMT_RECV:
4164         return nvme_zone_mgmt_recv(n, req);
4165     default:
4166         assert(false);
4167     }
4168 
4169     return NVME_INVALID_OPCODE | NVME_DNR;
4170 }
4171 
4172 static void nvme_cq_notifier(EventNotifier *e)
4173 {
4174     NvmeCQueue *cq = container_of(e, NvmeCQueue, notifier);
4175     NvmeCtrl *n = cq->ctrl;
4176 
4177     if (!event_notifier_test_and_clear(e)) {
4178         return;
4179     }
4180 
4181     nvme_update_cq_head(cq);
4182 
4183     if (cq->tail == cq->head) {
4184         if (cq->irq_enabled) {
4185             n->cq_pending--;
4186         }
4187 
4188         nvme_irq_deassert(n, cq);
4189     }
4190 
4191     qemu_bh_schedule(cq->bh);
4192 }
4193 
4194 static int nvme_init_cq_ioeventfd(NvmeCQueue *cq)
4195 {
4196     NvmeCtrl *n = cq->ctrl;
4197     uint16_t offset = (cq->cqid << 3) + (1 << 2);
4198     int ret;
4199 
4200     ret = event_notifier_init(&cq->notifier, 0);
4201     if (ret < 0) {
4202         return ret;
4203     }
4204 
4205     event_notifier_set_handler(&cq->notifier, nvme_cq_notifier);
4206     memory_region_add_eventfd(&n->iomem,
4207                               0x1000 + offset, 4, false, 0, &cq->notifier);
4208 
4209     return 0;
4210 }
4211 
4212 static void nvme_sq_notifier(EventNotifier *e)
4213 {
4214     NvmeSQueue *sq = container_of(e, NvmeSQueue, notifier);
4215 
4216     if (!event_notifier_test_and_clear(e)) {
4217         return;
4218     }
4219 
4220     nvme_process_sq(sq);
4221 }
4222 
4223 static int nvme_init_sq_ioeventfd(NvmeSQueue *sq)
4224 {
4225     NvmeCtrl *n = sq->ctrl;
4226     uint16_t offset = sq->sqid << 3;
4227     int ret;
4228 
4229     ret = event_notifier_init(&sq->notifier, 0);
4230     if (ret < 0) {
4231         return ret;
4232     }
4233 
4234     event_notifier_set_handler(&sq->notifier, nvme_sq_notifier);
4235     memory_region_add_eventfd(&n->iomem,
4236                               0x1000 + offset, 4, false, 0, &sq->notifier);
4237 
4238     return 0;
4239 }
4240 
4241 static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
4242 {
4243     uint16_t offset = sq->sqid << 3;
4244 
4245     n->sq[sq->sqid] = NULL;
4246     qemu_bh_delete(sq->bh);
4247     if (sq->ioeventfd_enabled) {
4248         memory_region_del_eventfd(&n->iomem,
4249                                   0x1000 + offset, 4, false, 0, &sq->notifier);
4250         event_notifier_set_handler(&sq->notifier, NULL);
4251         event_notifier_cleanup(&sq->notifier);
4252     }
4253     g_free(sq->io_req);
4254     if (sq->sqid) {
4255         g_free(sq);
4256     }
4257 }
4258 
4259 static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req)
4260 {
4261     NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
4262     NvmeRequest *r, *next;
4263     NvmeSQueue *sq;
4264     NvmeCQueue *cq;
4265     uint16_t qid = le16_to_cpu(c->qid);
4266 
4267     if (unlikely(!qid || nvme_check_sqid(n, qid))) {
4268         trace_pci_nvme_err_invalid_del_sq(qid);
4269         return NVME_INVALID_QID | NVME_DNR;
4270     }
4271 
4272     trace_pci_nvme_del_sq(qid);
4273 
4274     sq = n->sq[qid];
4275     while (!QTAILQ_EMPTY(&sq->out_req_list)) {
4276         r = QTAILQ_FIRST(&sq->out_req_list);
4277         assert(r->aiocb);
4278         blk_aio_cancel(r->aiocb);
4279     }
4280 
4281     assert(QTAILQ_EMPTY(&sq->out_req_list));
4282 
4283     if (!nvme_check_cqid(n, sq->cqid)) {
4284         cq = n->cq[sq->cqid];
4285         QTAILQ_REMOVE(&cq->sq_list, sq, entry);
4286 
4287         nvme_post_cqes(cq);
4288         QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) {
4289             if (r->sq == sq) {
4290                 QTAILQ_REMOVE(&cq->req_list, r, entry);
4291                 QTAILQ_INSERT_TAIL(&sq->req_list, r, entry);
4292             }
4293         }
4294     }
4295 
4296     nvme_free_sq(sq, n);
4297     return NVME_SUCCESS;
4298 }
4299 
4300 static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
4301                          uint16_t sqid, uint16_t cqid, uint16_t size)
4302 {
4303     int i;
4304     NvmeCQueue *cq;
4305 
4306     sq->ctrl = n;
4307     sq->dma_addr = dma_addr;
4308     sq->sqid = sqid;
4309     sq->size = size;
4310     sq->cqid = cqid;
4311     sq->head = sq->tail = 0;
4312     sq->io_req = g_new0(NvmeRequest, sq->size);
4313 
4314     QTAILQ_INIT(&sq->req_list);
4315     QTAILQ_INIT(&sq->out_req_list);
4316     for (i = 0; i < sq->size; i++) {
4317         sq->io_req[i].sq = sq;
4318         QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
4319     }
4320 
4321     sq->bh = qemu_bh_new(nvme_process_sq, sq);
4322 
4323     if (n->dbbuf_enabled) {
4324         sq->db_addr = n->dbbuf_dbs + (sqid << 3);
4325         sq->ei_addr = n->dbbuf_eis + (sqid << 3);
4326 
4327         if (n->params.ioeventfd && sq->sqid != 0) {
4328             if (!nvme_init_sq_ioeventfd(sq)) {
4329                 sq->ioeventfd_enabled = true;
4330             }
4331         }
4332     }
4333 
4334     assert(n->cq[cqid]);
4335     cq = n->cq[cqid];
4336     QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
4337     n->sq[sqid] = sq;
4338 }
4339 
4340 static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req)
4341 {
4342     NvmeSQueue *sq;
4343     NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd;
4344 
4345     uint16_t cqid = le16_to_cpu(c->cqid);
4346     uint16_t sqid = le16_to_cpu(c->sqid);
4347     uint16_t qsize = le16_to_cpu(c->qsize);
4348     uint16_t qflags = le16_to_cpu(c->sq_flags);
4349     uint64_t prp1 = le64_to_cpu(c->prp1);
4350 
4351     trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
4352 
4353     if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
4354         trace_pci_nvme_err_invalid_create_sq_cqid(cqid);
4355         return NVME_INVALID_CQID | NVME_DNR;
4356     }
4357     if (unlikely(!sqid || sqid > n->conf_ioqpairs || n->sq[sqid] != NULL)) {
4358         trace_pci_nvme_err_invalid_create_sq_sqid(sqid);
4359         return NVME_INVALID_QID | NVME_DNR;
4360     }
4361     if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) {
4362         trace_pci_nvme_err_invalid_create_sq_size(qsize);
4363         return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
4364     }
4365     if (unlikely(prp1 & (n->page_size - 1))) {
4366         trace_pci_nvme_err_invalid_create_sq_addr(prp1);
4367         return NVME_INVALID_PRP_OFFSET | NVME_DNR;
4368     }
4369     if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
4370         trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
4371         return NVME_INVALID_FIELD | NVME_DNR;
4372     }
4373     sq = g_malloc0(sizeof(*sq));
4374     nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1);
4375     return NVME_SUCCESS;
4376 }
4377 
4378 struct nvme_stats {
4379     uint64_t units_read;
4380     uint64_t units_written;
4381     uint64_t read_commands;
4382     uint64_t write_commands;
4383 };
4384 
4385 static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats)
4386 {
4387     BlockAcctStats *s = blk_get_stats(ns->blkconf.blk);
4388 
4389     stats->units_read += s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS;
4390     stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS;
4391     stats->read_commands += s->nr_ops[BLOCK_ACCT_READ];
4392     stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE];
4393 }
4394 
4395 static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
4396                                 uint64_t off, NvmeRequest *req)
4397 {
4398     uint32_t nsid = le32_to_cpu(req->cmd.nsid);
4399     struct nvme_stats stats = { 0 };
4400     NvmeSmartLog smart = { 0 };
4401     uint32_t trans_len;
4402     NvmeNamespace *ns;
4403     time_t current_ms;
4404 
4405     if (off >= sizeof(smart)) {
4406         return NVME_INVALID_FIELD | NVME_DNR;
4407     }
4408 
4409     if (nsid != 0xffffffff) {
4410         ns = nvme_ns(n, nsid);
4411         if (!ns) {
4412             return NVME_INVALID_NSID | NVME_DNR;
4413         }
4414         nvme_set_blk_stats(ns, &stats);
4415     } else {
4416         int i;
4417 
4418         for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
4419             ns = nvme_ns(n, i);
4420             if (!ns) {
4421                 continue;
4422             }
4423             nvme_set_blk_stats(ns, &stats);
4424         }
4425     }
4426 
4427     trans_len = MIN(sizeof(smart) - off, buf_len);
4428     smart.critical_warning = n->smart_critical_warning;
4429 
4430     smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read,
4431                                                         1000));
4432     smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written,
4433                                                            1000));
4434     smart.host_read_commands[0] = cpu_to_le64(stats.read_commands);
4435     smart.host_write_commands[0] = cpu_to_le64(stats.write_commands);
4436 
4437     smart.temperature = cpu_to_le16(n->temperature);
4438 
4439     if ((n->temperature >= n->features.temp_thresh_hi) ||
4440         (n->temperature <= n->features.temp_thresh_low)) {
4441         smart.critical_warning |= NVME_SMART_TEMPERATURE;
4442     }
4443 
4444     current_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
4445     smart.power_on_hours[0] =
4446         cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60);
4447 
4448     if (!rae) {
4449         nvme_clear_events(n, NVME_AER_TYPE_SMART);
4450     }
4451 
4452     return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req);
4453 }
4454 
4455 static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
4456                                  NvmeRequest *req)
4457 {
4458     uint32_t trans_len;
4459     NvmeFwSlotInfoLog fw_log = {
4460         .afi = 0x1,
4461     };
4462 
4463     if (off >= sizeof(fw_log)) {
4464         return NVME_INVALID_FIELD | NVME_DNR;
4465     }
4466 
4467     strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' ');
4468     trans_len = MIN(sizeof(fw_log) - off, buf_len);
4469 
4470     return nvme_c2h(n, (uint8_t *) &fw_log + off, trans_len, req);
4471 }
4472 
4473 static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
4474                                 uint64_t off, NvmeRequest *req)
4475 {
4476     uint32_t trans_len;
4477     NvmeErrorLog errlog;
4478 
4479     if (off >= sizeof(errlog)) {
4480         return NVME_INVALID_FIELD | NVME_DNR;
4481     }
4482 
4483     if (!rae) {
4484         nvme_clear_events(n, NVME_AER_TYPE_ERROR);
4485     }
4486 
4487     memset(&errlog, 0x0, sizeof(errlog));
4488     trans_len = MIN(sizeof(errlog) - off, buf_len);
4489 
4490     return nvme_c2h(n, (uint8_t *)&errlog, trans_len, req);
4491 }
4492 
4493 static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
4494                                     uint64_t off, NvmeRequest *req)
4495 {
4496     uint32_t nslist[1024];
4497     uint32_t trans_len;
4498     int i = 0;
4499     uint32_t nsid;
4500 
4501     if (off >= sizeof(nslist)) {
4502         trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(nslist));
4503         return NVME_INVALID_FIELD | NVME_DNR;
4504     }
4505 
4506     memset(nslist, 0x0, sizeof(nslist));
4507     trans_len = MIN(sizeof(nslist) - off, buf_len);
4508 
4509     while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) !=
4510             NVME_CHANGED_NSID_SIZE) {
4511         /*
4512          * If more than 1024 namespaces, the first entry in the log page should
4513          * be set to FFFFFFFFh and the others to 0 as spec.
4514          */
4515         if (i == ARRAY_SIZE(nslist)) {
4516             memset(nslist, 0x0, sizeof(nslist));
4517             nslist[0] = 0xffffffff;
4518             break;
4519         }
4520 
4521         nslist[i++] = nsid;
4522         clear_bit(nsid, n->changed_nsids);
4523     }
4524 
4525     /*
4526      * Remove all the remaining list entries in case returns directly due to
4527      * more than 1024 namespaces.
4528      */
4529     if (nslist[0] == 0xffffffff) {
4530         bitmap_zero(n->changed_nsids, NVME_CHANGED_NSID_SIZE);
4531     }
4532 
4533     if (!rae) {
4534         nvme_clear_events(n, NVME_AER_TYPE_NOTICE);
4535     }
4536 
4537     return nvme_c2h(n, ((uint8_t *)nslist) + off, trans_len, req);
4538 }
4539 
4540 static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len,
4541                                  uint64_t off, NvmeRequest *req)
4542 {
4543     NvmeEffectsLog log = {};
4544     const uint32_t *src_iocs = NULL;
4545     uint32_t trans_len;
4546 
4547     if (off >= sizeof(log)) {
4548         trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(log));
4549         return NVME_INVALID_FIELD | NVME_DNR;
4550     }
4551 
4552     switch (NVME_CC_CSS(ldl_le_p(&n->bar.cc))) {
4553     case NVME_CC_CSS_NVM:
4554         src_iocs = nvme_cse_iocs_nvm;
4555         /* fall through */
4556     case NVME_CC_CSS_ADMIN_ONLY:
4557         break;
4558     case NVME_CC_CSS_CSI:
4559         switch (csi) {
4560         case NVME_CSI_NVM:
4561             src_iocs = nvme_cse_iocs_nvm;
4562             break;
4563         case NVME_CSI_ZONED:
4564             src_iocs = nvme_cse_iocs_zoned;
4565             break;
4566         }
4567     }
4568 
4569     memcpy(log.acs, nvme_cse_acs, sizeof(nvme_cse_acs));
4570 
4571     if (src_iocs) {
4572         memcpy(log.iocs, src_iocs, sizeof(log.iocs));
4573     }
4574 
4575     trans_len = MIN(sizeof(log) - off, buf_len);
4576 
4577     return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req);
4578 }
4579 
4580 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
4581 {
4582     NvmeCmd *cmd = &req->cmd;
4583 
4584     uint32_t dw10 = le32_to_cpu(cmd->cdw10);
4585     uint32_t dw11 = le32_to_cpu(cmd->cdw11);
4586     uint32_t dw12 = le32_to_cpu(cmd->cdw12);
4587     uint32_t dw13 = le32_to_cpu(cmd->cdw13);
4588     uint8_t  lid = dw10 & 0xff;
4589     uint8_t  lsp = (dw10 >> 8) & 0xf;
4590     uint8_t  rae = (dw10 >> 15) & 0x1;
4591     uint8_t  csi = le32_to_cpu(cmd->cdw14) >> 24;
4592     uint32_t numdl, numdu;
4593     uint64_t off, lpol, lpou;
4594     size_t   len;
4595     uint16_t status;
4596 
4597     numdl = (dw10 >> 16);
4598     numdu = (dw11 & 0xffff);
4599     lpol = dw12;
4600     lpou = dw13;
4601 
4602     len = (((numdu << 16) | numdl) + 1) << 2;
4603     off = (lpou << 32ULL) | lpol;
4604 
4605     if (off & 0x3) {
4606         return NVME_INVALID_FIELD | NVME_DNR;
4607     }
4608 
4609     trace_pci_nvme_get_log(nvme_cid(req), lid, lsp, rae, len, off);
4610 
4611     status = nvme_check_mdts(n, len);
4612     if (status) {
4613         return status;
4614     }
4615 
4616     switch (lid) {
4617     case NVME_LOG_ERROR_INFO:
4618         return nvme_error_info(n, rae, len, off, req);
4619     case NVME_LOG_SMART_INFO:
4620         return nvme_smart_info(n, rae, len, off, req);
4621     case NVME_LOG_FW_SLOT_INFO:
4622         return nvme_fw_log_info(n, len, off, req);
4623     case NVME_LOG_CHANGED_NSLIST:
4624         return nvme_changed_nslist(n, rae, len, off, req);
4625     case NVME_LOG_CMD_EFFECTS:
4626         return nvme_cmd_effects(n, csi, len, off, req);
4627     default:
4628         trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
4629         return NVME_INVALID_FIELD | NVME_DNR;
4630     }
4631 }
4632 
4633 static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
4634 {
4635     PCIDevice *pci = PCI_DEVICE(n);
4636     uint16_t offset = (cq->cqid << 3) + (1 << 2);
4637 
4638     n->cq[cq->cqid] = NULL;
4639     qemu_bh_delete(cq->bh);
4640     if (cq->ioeventfd_enabled) {
4641         memory_region_del_eventfd(&n->iomem,
4642                                   0x1000 + offset, 4, false, 0, &cq->notifier);
4643         event_notifier_set_handler(&cq->notifier, NULL);
4644         event_notifier_cleanup(&cq->notifier);
4645     }
4646     if (msix_enabled(pci)) {
4647         msix_vector_unuse(pci, cq->vector);
4648     }
4649     if (cq->cqid) {
4650         g_free(cq);
4651     }
4652 }
4653 
4654 static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req)
4655 {
4656     NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
4657     NvmeCQueue *cq;
4658     uint16_t qid = le16_to_cpu(c->qid);
4659 
4660     if (unlikely(!qid || nvme_check_cqid(n, qid))) {
4661         trace_pci_nvme_err_invalid_del_cq_cqid(qid);
4662         return NVME_INVALID_CQID | NVME_DNR;
4663     }
4664 
4665     cq = n->cq[qid];
4666     if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
4667         trace_pci_nvme_err_invalid_del_cq_notempty(qid);
4668         return NVME_INVALID_QUEUE_DEL;
4669     }
4670 
4671     if (cq->irq_enabled && cq->tail != cq->head) {
4672         n->cq_pending--;
4673     }
4674 
4675     nvme_irq_deassert(n, cq);
4676     trace_pci_nvme_del_cq(qid);
4677     nvme_free_cq(cq, n);
4678     return NVME_SUCCESS;
4679 }
4680 
4681 static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
4682                          uint16_t cqid, uint16_t vector, uint16_t size,
4683                          uint16_t irq_enabled)
4684 {
4685     PCIDevice *pci = PCI_DEVICE(n);
4686 
4687     if (msix_enabled(pci)) {
4688         msix_vector_use(pci, vector);
4689     }
4690     cq->ctrl = n;
4691     cq->cqid = cqid;
4692     cq->size = size;
4693     cq->dma_addr = dma_addr;
4694     cq->phase = 1;
4695     cq->irq_enabled = irq_enabled;
4696     cq->vector = vector;
4697     cq->head = cq->tail = 0;
4698     QTAILQ_INIT(&cq->req_list);
4699     QTAILQ_INIT(&cq->sq_list);
4700     if (n->dbbuf_enabled) {
4701         cq->db_addr = n->dbbuf_dbs + (cqid << 3) + (1 << 2);
4702         cq->ei_addr = n->dbbuf_eis + (cqid << 3) + (1 << 2);
4703 
4704         if (n->params.ioeventfd && cqid != 0) {
4705             if (!nvme_init_cq_ioeventfd(cq)) {
4706                 cq->ioeventfd_enabled = true;
4707             }
4708         }
4709     }
4710     n->cq[cqid] = cq;
4711     cq->bh = qemu_bh_new(nvme_post_cqes, cq);
4712 }
4713 
4714 static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
4715 {
4716     NvmeCQueue *cq;
4717     NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd;
4718     uint16_t cqid = le16_to_cpu(c->cqid);
4719     uint16_t vector = le16_to_cpu(c->irq_vector);
4720     uint16_t qsize = le16_to_cpu(c->qsize);
4721     uint16_t qflags = le16_to_cpu(c->cq_flags);
4722     uint64_t prp1 = le64_to_cpu(c->prp1);
4723 
4724     trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
4725                              NVME_CQ_FLAGS_IEN(qflags) != 0);
4726 
4727     if (unlikely(!cqid || cqid > n->conf_ioqpairs || n->cq[cqid] != NULL)) {
4728         trace_pci_nvme_err_invalid_create_cq_cqid(cqid);
4729         return NVME_INVALID_QID | NVME_DNR;
4730     }
4731     if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) {
4732         trace_pci_nvme_err_invalid_create_cq_size(qsize);
4733         return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
4734     }
4735     if (unlikely(prp1 & (n->page_size - 1))) {
4736         trace_pci_nvme_err_invalid_create_cq_addr(prp1);
4737         return NVME_INVALID_PRP_OFFSET | NVME_DNR;
4738     }
4739     if (unlikely(!msix_enabled(PCI_DEVICE(n)) && vector)) {
4740         trace_pci_nvme_err_invalid_create_cq_vector(vector);
4741         return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
4742     }
4743     if (unlikely(vector >= n->conf_msix_qsize)) {
4744         trace_pci_nvme_err_invalid_create_cq_vector(vector);
4745         return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
4746     }
4747     if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
4748         trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
4749         return NVME_INVALID_FIELD | NVME_DNR;
4750     }
4751 
4752     cq = g_malloc0(sizeof(*cq));
4753     nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1,
4754                  NVME_CQ_FLAGS_IEN(qflags));
4755 
4756     /*
4757      * It is only required to set qs_created when creating a completion queue;
4758      * creating a submission queue without a matching completion queue will
4759      * fail.
4760      */
4761     n->qs_created = true;
4762     return NVME_SUCCESS;
4763 }
4764 
4765 static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest *req)
4766 {
4767     uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
4768 
4769     return nvme_c2h(n, id, sizeof(id), req);
4770 }
4771 
4772 static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
4773 {
4774     trace_pci_nvme_identify_ctrl();
4775 
4776     return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req);
4777 }
4778 
4779 static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
4780 {
4781     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4782     uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
4783     NvmeIdCtrlNvm *id_nvm = (NvmeIdCtrlNvm *)&id;
4784 
4785     trace_pci_nvme_identify_ctrl_csi(c->csi);
4786 
4787     switch (c->csi) {
4788     case NVME_CSI_NVM:
4789         id_nvm->vsl = n->params.vsl;
4790         id_nvm->dmrsl = cpu_to_le32(n->dmrsl);
4791         break;
4792 
4793     case NVME_CSI_ZONED:
4794         ((NvmeIdCtrlZoned *)&id)->zasl = n->params.zasl;
4795         break;
4796 
4797     default:
4798         return NVME_INVALID_FIELD | NVME_DNR;
4799     }
4800 
4801     return nvme_c2h(n, id, sizeof(id), req);
4802 }
4803 
4804 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active)
4805 {
4806     NvmeNamespace *ns;
4807     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4808     uint32_t nsid = le32_to_cpu(c->nsid);
4809 
4810     trace_pci_nvme_identify_ns(nsid);
4811 
4812     if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
4813         return NVME_INVALID_NSID | NVME_DNR;
4814     }
4815 
4816     ns = nvme_ns(n, nsid);
4817     if (unlikely(!ns)) {
4818         if (!active) {
4819             ns = nvme_subsys_ns(n->subsys, nsid);
4820             if (!ns) {
4821                 return nvme_rpt_empty_id_struct(n, req);
4822             }
4823         } else {
4824             return nvme_rpt_empty_id_struct(n, req);
4825         }
4826     }
4827 
4828     if (active || ns->csi == NVME_CSI_NVM) {
4829         return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req);
4830     }
4831 
4832     return NVME_INVALID_CMD_SET | NVME_DNR;
4833 }
4834 
4835 static uint16_t nvme_identify_ctrl_list(NvmeCtrl *n, NvmeRequest *req,
4836                                         bool attached)
4837 {
4838     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4839     uint32_t nsid = le32_to_cpu(c->nsid);
4840     uint16_t min_id = le16_to_cpu(c->ctrlid);
4841     uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
4842     uint16_t *ids = &list[1];
4843     NvmeNamespace *ns;
4844     NvmeCtrl *ctrl;
4845     int cntlid, nr_ids = 0;
4846 
4847     trace_pci_nvme_identify_ctrl_list(c->cns, min_id);
4848 
4849     if (!n->subsys) {
4850         return NVME_INVALID_FIELD | NVME_DNR;
4851     }
4852 
4853     if (attached) {
4854         if (nsid == NVME_NSID_BROADCAST) {
4855             return NVME_INVALID_FIELD | NVME_DNR;
4856         }
4857 
4858         ns = nvme_subsys_ns(n->subsys, nsid);
4859         if (!ns) {
4860             return NVME_INVALID_FIELD | NVME_DNR;
4861         }
4862     }
4863 
4864     for (cntlid = min_id; cntlid < ARRAY_SIZE(n->subsys->ctrls); cntlid++) {
4865         ctrl = nvme_subsys_ctrl(n->subsys, cntlid);
4866         if (!ctrl) {
4867             continue;
4868         }
4869 
4870         if (attached && !nvme_ns(ctrl, nsid)) {
4871             continue;
4872         }
4873 
4874         ids[nr_ids++] = cntlid;
4875     }
4876 
4877     list[0] = nr_ids;
4878 
4879     return nvme_c2h(n, (uint8_t *)list, sizeof(list), req);
4880 }
4881 
4882 static uint16_t nvme_identify_pri_ctrl_cap(NvmeCtrl *n, NvmeRequest *req)
4883 {
4884     trace_pci_nvme_identify_pri_ctrl_cap(le16_to_cpu(n->pri_ctrl_cap.cntlid));
4885 
4886     return nvme_c2h(n, (uint8_t *)&n->pri_ctrl_cap,
4887                     sizeof(NvmePriCtrlCap), req);
4888 }
4889 
4890 static uint16_t nvme_identify_sec_ctrl_list(NvmeCtrl *n, NvmeRequest *req)
4891 {
4892     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4893     uint16_t pri_ctrl_id = le16_to_cpu(n->pri_ctrl_cap.cntlid);
4894     uint16_t min_id = le16_to_cpu(c->ctrlid);
4895     uint8_t num_sec_ctrl = n->sec_ctrl_list.numcntl;
4896     NvmeSecCtrlList list = {0};
4897     uint8_t i;
4898 
4899     for (i = 0; i < num_sec_ctrl; i++) {
4900         if (n->sec_ctrl_list.sec[i].scid >= min_id) {
4901             list.numcntl = num_sec_ctrl - i;
4902             memcpy(&list.sec, n->sec_ctrl_list.sec + i,
4903                    list.numcntl * sizeof(NvmeSecCtrlEntry));
4904             break;
4905         }
4906     }
4907 
4908     trace_pci_nvme_identify_sec_ctrl_list(pri_ctrl_id, list.numcntl);
4909 
4910     return nvme_c2h(n, (uint8_t *)&list, sizeof(list), req);
4911 }
4912 
4913 static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req,
4914                                      bool active)
4915 {
4916     NvmeNamespace *ns;
4917     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4918     uint32_t nsid = le32_to_cpu(c->nsid);
4919 
4920     trace_pci_nvme_identify_ns_csi(nsid, c->csi);
4921 
4922     if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
4923         return NVME_INVALID_NSID | NVME_DNR;
4924     }
4925 
4926     ns = nvme_ns(n, nsid);
4927     if (unlikely(!ns)) {
4928         if (!active) {
4929             ns = nvme_subsys_ns(n->subsys, nsid);
4930             if (!ns) {
4931                 return nvme_rpt_empty_id_struct(n, req);
4932             }
4933         } else {
4934             return nvme_rpt_empty_id_struct(n, req);
4935         }
4936     }
4937 
4938     if (c->csi == NVME_CSI_NVM) {
4939         return nvme_c2h(n, (uint8_t *)&ns->id_ns_nvm, sizeof(NvmeIdNsNvm),
4940                         req);
4941     } else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) {
4942         return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned),
4943                         req);
4944     }
4945 
4946     return NVME_INVALID_FIELD | NVME_DNR;
4947 }
4948 
4949 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req,
4950                                      bool active)
4951 {
4952     NvmeNamespace *ns;
4953     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4954     uint32_t min_nsid = le32_to_cpu(c->nsid);
4955     uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
4956     static const int data_len = sizeof(list);
4957     uint32_t *list_ptr = (uint32_t *)list;
4958     int i, j = 0;
4959 
4960     trace_pci_nvme_identify_nslist(min_nsid);
4961 
4962     /*
4963      * Both FFFFFFFFh (NVME_NSID_BROADCAST) and FFFFFFFFEh are invalid values
4964      * since the Active Namespace ID List should return namespaces with ids
4965      * *higher* than the NSID specified in the command. This is also specified
4966      * in the spec (NVM Express v1.3d, Section 5.15.4).
4967      */
4968     if (min_nsid >= NVME_NSID_BROADCAST - 1) {
4969         return NVME_INVALID_NSID | NVME_DNR;
4970     }
4971 
4972     for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
4973         ns = nvme_ns(n, i);
4974         if (!ns) {
4975             if (!active) {
4976                 ns = nvme_subsys_ns(n->subsys, i);
4977                 if (!ns) {
4978                     continue;
4979                 }
4980             } else {
4981                 continue;
4982             }
4983         }
4984         if (ns->params.nsid <= min_nsid) {
4985             continue;
4986         }
4987         list_ptr[j++] = cpu_to_le32(ns->params.nsid);
4988         if (j == data_len / sizeof(uint32_t)) {
4989             break;
4990         }
4991     }
4992 
4993     return nvme_c2h(n, list, data_len, req);
4994 }
4995 
4996 static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req,
4997                                          bool active)
4998 {
4999     NvmeNamespace *ns;
5000     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
5001     uint32_t min_nsid = le32_to_cpu(c->nsid);
5002     uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
5003     static const int data_len = sizeof(list);
5004     uint32_t *list_ptr = (uint32_t *)list;
5005     int i, j = 0;
5006 
5007     trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi);
5008 
5009     /*
5010      * Same as in nvme_identify_nslist(), FFFFFFFFh/FFFFFFFFEh are invalid.
5011      */
5012     if (min_nsid >= NVME_NSID_BROADCAST - 1) {
5013         return NVME_INVALID_NSID | NVME_DNR;
5014     }
5015 
5016     if (c->csi != NVME_CSI_NVM && c->csi != NVME_CSI_ZONED) {
5017         return NVME_INVALID_FIELD | NVME_DNR;
5018     }
5019 
5020     for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
5021         ns = nvme_ns(n, i);
5022         if (!ns) {
5023             if (!active) {
5024                 ns = nvme_subsys_ns(n->subsys, i);
5025                 if (!ns) {
5026                     continue;
5027                 }
5028             } else {
5029                 continue;
5030             }
5031         }
5032         if (ns->params.nsid <= min_nsid || c->csi != ns->csi) {
5033             continue;
5034         }
5035         list_ptr[j++] = cpu_to_le32(ns->params.nsid);
5036         if (j == data_len / sizeof(uint32_t)) {
5037             break;
5038         }
5039     }
5040 
5041     return nvme_c2h(n, list, data_len, req);
5042 }
5043 
5044 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
5045 {
5046     NvmeNamespace *ns;
5047     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
5048     uint32_t nsid = le32_to_cpu(c->nsid);
5049     uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
5050     uint8_t *pos = list;
5051     struct {
5052         NvmeIdNsDescr hdr;
5053         uint8_t v[NVME_NIDL_UUID];
5054     } QEMU_PACKED uuid = {};
5055     struct {
5056         NvmeIdNsDescr hdr;
5057         uint64_t v;
5058     } QEMU_PACKED eui64 = {};
5059     struct {
5060         NvmeIdNsDescr hdr;
5061         uint8_t v;
5062     } QEMU_PACKED csi = {};
5063 
5064     trace_pci_nvme_identify_ns_descr_list(nsid);
5065 
5066     if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
5067         return NVME_INVALID_NSID | NVME_DNR;
5068     }
5069 
5070     ns = nvme_ns(n, nsid);
5071     if (unlikely(!ns)) {
5072         return NVME_INVALID_FIELD | NVME_DNR;
5073     }
5074 
5075     if (!qemu_uuid_is_null(&ns->params.uuid)) {
5076         uuid.hdr.nidt = NVME_NIDT_UUID;
5077         uuid.hdr.nidl = NVME_NIDL_UUID;
5078         memcpy(uuid.v, ns->params.uuid.data, NVME_NIDL_UUID);
5079         memcpy(pos, &uuid, sizeof(uuid));
5080         pos += sizeof(uuid);
5081     }
5082 
5083     if (ns->params.eui64) {
5084         eui64.hdr.nidt = NVME_NIDT_EUI64;
5085         eui64.hdr.nidl = NVME_NIDL_EUI64;
5086         eui64.v = cpu_to_be64(ns->params.eui64);
5087         memcpy(pos, &eui64, sizeof(eui64));
5088         pos += sizeof(eui64);
5089     }
5090 
5091     csi.hdr.nidt = NVME_NIDT_CSI;
5092     csi.hdr.nidl = NVME_NIDL_CSI;
5093     csi.v = ns->csi;
5094     memcpy(pos, &csi, sizeof(csi));
5095     pos += sizeof(csi);
5096 
5097     return nvme_c2h(n, list, sizeof(list), req);
5098 }
5099 
5100 static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req)
5101 {
5102     uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
5103     static const int data_len = sizeof(list);
5104 
5105     trace_pci_nvme_identify_cmd_set();
5106 
5107     NVME_SET_CSI(*list, NVME_CSI_NVM);
5108     NVME_SET_CSI(*list, NVME_CSI_ZONED);
5109 
5110     return nvme_c2h(n, list, data_len, req);
5111 }
5112 
5113 static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
5114 {
5115     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
5116 
5117     trace_pci_nvme_identify(nvme_cid(req), c->cns, le16_to_cpu(c->ctrlid),
5118                             c->csi);
5119 
5120     switch (c->cns) {
5121     case NVME_ID_CNS_NS:
5122         return nvme_identify_ns(n, req, true);
5123     case NVME_ID_CNS_NS_PRESENT:
5124         return nvme_identify_ns(n, req, false);
5125     case NVME_ID_CNS_NS_ATTACHED_CTRL_LIST:
5126         return nvme_identify_ctrl_list(n, req, true);
5127     case NVME_ID_CNS_CTRL_LIST:
5128         return nvme_identify_ctrl_list(n, req, false);
5129     case NVME_ID_CNS_PRIMARY_CTRL_CAP:
5130         return nvme_identify_pri_ctrl_cap(n, req);
5131     case NVME_ID_CNS_SECONDARY_CTRL_LIST:
5132         return nvme_identify_sec_ctrl_list(n, req);
5133     case NVME_ID_CNS_CS_NS:
5134         return nvme_identify_ns_csi(n, req, true);
5135     case NVME_ID_CNS_CS_NS_PRESENT:
5136         return nvme_identify_ns_csi(n, req, false);
5137     case NVME_ID_CNS_CTRL:
5138         return nvme_identify_ctrl(n, req);
5139     case NVME_ID_CNS_CS_CTRL:
5140         return nvme_identify_ctrl_csi(n, req);
5141     case NVME_ID_CNS_NS_ACTIVE_LIST:
5142         return nvme_identify_nslist(n, req, true);
5143     case NVME_ID_CNS_NS_PRESENT_LIST:
5144         return nvme_identify_nslist(n, req, false);
5145     case NVME_ID_CNS_CS_NS_ACTIVE_LIST:
5146         return nvme_identify_nslist_csi(n, req, true);
5147     case NVME_ID_CNS_CS_NS_PRESENT_LIST:
5148         return nvme_identify_nslist_csi(n, req, false);
5149     case NVME_ID_CNS_NS_DESCR_LIST:
5150         return nvme_identify_ns_descr_list(n, req);
5151     case NVME_ID_CNS_IO_COMMAND_SET:
5152         return nvme_identify_cmd_set(n, req);
5153     default:
5154         trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
5155         return NVME_INVALID_FIELD | NVME_DNR;
5156     }
5157 }
5158 
5159 static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req)
5160 {
5161     uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff;
5162 
5163     req->cqe.result = 1;
5164     if (nvme_check_sqid(n, sqid)) {
5165         return NVME_INVALID_FIELD | NVME_DNR;
5166     }
5167 
5168     return NVME_SUCCESS;
5169 }
5170 
5171 static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts)
5172 {
5173     trace_pci_nvme_setfeat_timestamp(ts);
5174 
5175     n->host_timestamp = le64_to_cpu(ts);
5176     n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
5177 }
5178 
5179 static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
5180 {
5181     uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
5182     uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms;
5183 
5184     union nvme_timestamp {
5185         struct {
5186             uint64_t timestamp:48;
5187             uint64_t sync:1;
5188             uint64_t origin:3;
5189             uint64_t rsvd1:12;
5190         };
5191         uint64_t all;
5192     };
5193 
5194     union nvme_timestamp ts;
5195     ts.all = 0;
5196     ts.timestamp = n->host_timestamp + elapsed_time;
5197 
5198     /* If the host timestamp is non-zero, set the timestamp origin */
5199     ts.origin = n->host_timestamp ? 0x01 : 0x00;
5200 
5201     trace_pci_nvme_getfeat_timestamp(ts.all);
5202 
5203     return cpu_to_le64(ts.all);
5204 }
5205 
5206 static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
5207 {
5208     uint64_t timestamp = nvme_get_timestamp(n);
5209 
5210     return nvme_c2h(n, (uint8_t *)&timestamp, sizeof(timestamp), req);
5211 }
5212 
5213 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
5214 {
5215     NvmeCmd *cmd = &req->cmd;
5216     uint32_t dw10 = le32_to_cpu(cmd->cdw10);
5217     uint32_t dw11 = le32_to_cpu(cmd->cdw11);
5218     uint32_t nsid = le32_to_cpu(cmd->nsid);
5219     uint32_t result;
5220     uint8_t fid = NVME_GETSETFEAT_FID(dw10);
5221     NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10);
5222     uint16_t iv;
5223     NvmeNamespace *ns;
5224     int i;
5225 
5226     static const uint32_t nvme_feature_default[NVME_FID_MAX] = {
5227         [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT,
5228     };
5229 
5230     trace_pci_nvme_getfeat(nvme_cid(req), nsid, fid, sel, dw11);
5231 
5232     if (!nvme_feature_support[fid]) {
5233         return NVME_INVALID_FIELD | NVME_DNR;
5234     }
5235 
5236     if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) {
5237         if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
5238             /*
5239              * The Reservation Notification Mask and Reservation Persistence
5240              * features require a status code of Invalid Field in Command when
5241              * NSID is FFFFFFFFh. Since the device does not support those
5242              * features we can always return Invalid Namespace or Format as we
5243              * should do for all other features.
5244              */
5245             return NVME_INVALID_NSID | NVME_DNR;
5246         }
5247 
5248         if (!nvme_ns(n, nsid)) {
5249             return NVME_INVALID_FIELD | NVME_DNR;
5250         }
5251     }
5252 
5253     switch (sel) {
5254     case NVME_GETFEAT_SELECT_CURRENT:
5255         break;
5256     case NVME_GETFEAT_SELECT_SAVED:
5257         /* no features are saveable by the controller; fallthrough */
5258     case NVME_GETFEAT_SELECT_DEFAULT:
5259         goto defaults;
5260     case NVME_GETFEAT_SELECT_CAP:
5261         result = nvme_feature_cap[fid];
5262         goto out;
5263     }
5264 
5265     switch (fid) {
5266     case NVME_TEMPERATURE_THRESHOLD:
5267         result = 0;
5268 
5269         /*
5270          * The controller only implements the Composite Temperature sensor, so
5271          * return 0 for all other sensors.
5272          */
5273         if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
5274             goto out;
5275         }
5276 
5277         switch (NVME_TEMP_THSEL(dw11)) {
5278         case NVME_TEMP_THSEL_OVER:
5279             result = n->features.temp_thresh_hi;
5280             goto out;
5281         case NVME_TEMP_THSEL_UNDER:
5282             result = n->features.temp_thresh_low;
5283             goto out;
5284         }
5285 
5286         return NVME_INVALID_FIELD | NVME_DNR;
5287     case NVME_ERROR_RECOVERY:
5288         if (!nvme_nsid_valid(n, nsid)) {
5289             return NVME_INVALID_NSID | NVME_DNR;
5290         }
5291 
5292         ns = nvme_ns(n, nsid);
5293         if (unlikely(!ns)) {
5294             return NVME_INVALID_FIELD | NVME_DNR;
5295         }
5296 
5297         result = ns->features.err_rec;
5298         goto out;
5299     case NVME_VOLATILE_WRITE_CACHE:
5300         result = 0;
5301         for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
5302             ns = nvme_ns(n, i);
5303             if (!ns) {
5304                 continue;
5305             }
5306 
5307             result = blk_enable_write_cache(ns->blkconf.blk);
5308             if (result) {
5309                 break;
5310             }
5311         }
5312         trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
5313         goto out;
5314     case NVME_ASYNCHRONOUS_EVENT_CONF:
5315         result = n->features.async_config;
5316         goto out;
5317     case NVME_TIMESTAMP:
5318         return nvme_get_feature_timestamp(n, req);
5319     case NVME_HOST_BEHAVIOR_SUPPORT:
5320         return nvme_c2h(n, (uint8_t *)&n->features.hbs,
5321                         sizeof(n->features.hbs), req);
5322     default:
5323         break;
5324     }
5325 
5326 defaults:
5327     switch (fid) {
5328     case NVME_TEMPERATURE_THRESHOLD:
5329         result = 0;
5330 
5331         if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
5332             break;
5333         }
5334 
5335         if (NVME_TEMP_THSEL(dw11) == NVME_TEMP_THSEL_OVER) {
5336             result = NVME_TEMPERATURE_WARNING;
5337         }
5338 
5339         break;
5340     case NVME_NUMBER_OF_QUEUES:
5341         result = (n->conf_ioqpairs - 1) | ((n->conf_ioqpairs - 1) << 16);
5342         trace_pci_nvme_getfeat_numq(result);
5343         break;
5344     case NVME_INTERRUPT_VECTOR_CONF:
5345         iv = dw11 & 0xffff;
5346         if (iv >= n->conf_ioqpairs + 1) {
5347             return NVME_INVALID_FIELD | NVME_DNR;
5348         }
5349 
5350         result = iv;
5351         if (iv == n->admin_cq.vector) {
5352             result |= NVME_INTVC_NOCOALESCING;
5353         }
5354         break;
5355     default:
5356         result = nvme_feature_default[fid];
5357         break;
5358     }
5359 
5360 out:
5361     req->cqe.result = cpu_to_le32(result);
5362     return NVME_SUCCESS;
5363 }
5364 
5365 static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
5366 {
5367     uint16_t ret;
5368     uint64_t timestamp;
5369 
5370     ret = nvme_h2c(n, (uint8_t *)&timestamp, sizeof(timestamp), req);
5371     if (ret) {
5372         return ret;
5373     }
5374 
5375     nvme_set_timestamp(n, timestamp);
5376 
5377     return NVME_SUCCESS;
5378 }
5379 
5380 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
5381 {
5382     NvmeNamespace *ns = NULL;
5383 
5384     NvmeCmd *cmd = &req->cmd;
5385     uint32_t dw10 = le32_to_cpu(cmd->cdw10);
5386     uint32_t dw11 = le32_to_cpu(cmd->cdw11);
5387     uint32_t nsid = le32_to_cpu(cmd->nsid);
5388     uint8_t fid = NVME_GETSETFEAT_FID(dw10);
5389     uint8_t save = NVME_SETFEAT_SAVE(dw10);
5390     uint16_t status;
5391     int i;
5392 
5393     trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11);
5394 
5395     if (save && !(nvme_feature_cap[fid] & NVME_FEAT_CAP_SAVE)) {
5396         return NVME_FID_NOT_SAVEABLE | NVME_DNR;
5397     }
5398 
5399     if (!nvme_feature_support[fid]) {
5400         return NVME_INVALID_FIELD | NVME_DNR;
5401     }
5402 
5403     if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) {
5404         if (nsid != NVME_NSID_BROADCAST) {
5405             if (!nvme_nsid_valid(n, nsid)) {
5406                 return NVME_INVALID_NSID | NVME_DNR;
5407             }
5408 
5409             ns = nvme_ns(n, nsid);
5410             if (unlikely(!ns)) {
5411                 return NVME_INVALID_FIELD | NVME_DNR;
5412             }
5413         }
5414     } else if (nsid && nsid != NVME_NSID_BROADCAST) {
5415         if (!nvme_nsid_valid(n, nsid)) {
5416             return NVME_INVALID_NSID | NVME_DNR;
5417         }
5418 
5419         return NVME_FEAT_NOT_NS_SPEC | NVME_DNR;
5420     }
5421 
5422     if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) {
5423         return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
5424     }
5425 
5426     switch (fid) {
5427     case NVME_TEMPERATURE_THRESHOLD:
5428         if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
5429             break;
5430         }
5431 
5432         switch (NVME_TEMP_THSEL(dw11)) {
5433         case NVME_TEMP_THSEL_OVER:
5434             n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11);
5435             break;
5436         case NVME_TEMP_THSEL_UNDER:
5437             n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11);
5438             break;
5439         default:
5440             return NVME_INVALID_FIELD | NVME_DNR;
5441         }
5442 
5443         if ((n->temperature >= n->features.temp_thresh_hi) ||
5444             (n->temperature <= n->features.temp_thresh_low)) {
5445             nvme_smart_event(n, NVME_SMART_TEMPERATURE);
5446         }
5447 
5448         break;
5449     case NVME_ERROR_RECOVERY:
5450         if (nsid == NVME_NSID_BROADCAST) {
5451             for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
5452                 ns = nvme_ns(n, i);
5453 
5454                 if (!ns) {
5455                     continue;
5456                 }
5457 
5458                 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) {
5459                     ns->features.err_rec = dw11;
5460                 }
5461             }
5462 
5463             break;
5464         }
5465 
5466         assert(ns);
5467         if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat))  {
5468             ns->features.err_rec = dw11;
5469         }
5470         break;
5471     case NVME_VOLATILE_WRITE_CACHE:
5472         for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
5473             ns = nvme_ns(n, i);
5474             if (!ns) {
5475                 continue;
5476             }
5477 
5478             if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) {
5479                 blk_flush(ns->blkconf.blk);
5480             }
5481 
5482             blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1);
5483         }
5484 
5485         break;
5486 
5487     case NVME_NUMBER_OF_QUEUES:
5488         if (n->qs_created) {
5489             return NVME_CMD_SEQ_ERROR | NVME_DNR;
5490         }
5491 
5492         /*
5493          * NVMe v1.3, Section 5.21.1.7: FFFFh is not an allowed value for NCQR
5494          * and NSQR.
5495          */
5496         if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) {
5497             return NVME_INVALID_FIELD | NVME_DNR;
5498         }
5499 
5500         trace_pci_nvme_setfeat_numq((dw11 & 0xffff) + 1,
5501                                     ((dw11 >> 16) & 0xffff) + 1,
5502                                     n->conf_ioqpairs,
5503                                     n->conf_ioqpairs);
5504         req->cqe.result = cpu_to_le32((n->conf_ioqpairs - 1) |
5505                                       ((n->conf_ioqpairs - 1) << 16));
5506         break;
5507     case NVME_ASYNCHRONOUS_EVENT_CONF:
5508         n->features.async_config = dw11;
5509         break;
5510     case NVME_TIMESTAMP:
5511         return nvme_set_feature_timestamp(n, req);
5512     case NVME_HOST_BEHAVIOR_SUPPORT:
5513         status = nvme_h2c(n, (uint8_t *)&n->features.hbs,
5514                           sizeof(n->features.hbs), req);
5515         if (status) {
5516             return status;
5517         }
5518 
5519         for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
5520             ns = nvme_ns(n, i);
5521 
5522             if (!ns) {
5523                 continue;
5524             }
5525 
5526             ns->id_ns.nlbaf = ns->nlbaf - 1;
5527             if (!n->features.hbs.lbafee) {
5528                 ns->id_ns.nlbaf = MIN(ns->id_ns.nlbaf, 15);
5529             }
5530         }
5531 
5532         return status;
5533     case NVME_COMMAND_SET_PROFILE:
5534         if (dw11 & 0x1ff) {
5535             trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff);
5536             return NVME_CMD_SET_CMB_REJECTED | NVME_DNR;
5537         }
5538         break;
5539     default:
5540         return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
5541     }
5542     return NVME_SUCCESS;
5543 }
5544 
5545 static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
5546 {
5547     trace_pci_nvme_aer(nvme_cid(req));
5548 
5549     if (n->outstanding_aers > n->params.aerl) {
5550         trace_pci_nvme_aer_aerl_exceeded();
5551         return NVME_AER_LIMIT_EXCEEDED;
5552     }
5553 
5554     n->aer_reqs[n->outstanding_aers] = req;
5555     n->outstanding_aers++;
5556 
5557     if (!QTAILQ_EMPTY(&n->aer_queue)) {
5558         nvme_process_aers(n);
5559     }
5560 
5561     return NVME_NO_COMPLETE;
5562 }
5563 
5564 static void nvme_update_dmrsl(NvmeCtrl *n)
5565 {
5566     int nsid;
5567 
5568     for (nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) {
5569         NvmeNamespace *ns = nvme_ns(n, nsid);
5570         if (!ns) {
5571             continue;
5572         }
5573 
5574         n->dmrsl = MIN_NON_ZERO(n->dmrsl,
5575                                 BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1));
5576     }
5577 }
5578 
5579 static void nvme_select_iocs_ns(NvmeCtrl *n, NvmeNamespace *ns)
5580 {
5581     uint32_t cc = ldl_le_p(&n->bar.cc);
5582 
5583     ns->iocs = nvme_cse_iocs_none;
5584     switch (ns->csi) {
5585     case NVME_CSI_NVM:
5586         if (NVME_CC_CSS(cc) != NVME_CC_CSS_ADMIN_ONLY) {
5587             ns->iocs = nvme_cse_iocs_nvm;
5588         }
5589         break;
5590     case NVME_CSI_ZONED:
5591         if (NVME_CC_CSS(cc) == NVME_CC_CSS_CSI) {
5592             ns->iocs = nvme_cse_iocs_zoned;
5593         } else if (NVME_CC_CSS(cc) == NVME_CC_CSS_NVM) {
5594             ns->iocs = nvme_cse_iocs_nvm;
5595         }
5596         break;
5597     }
5598 }
5599 
5600 static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
5601 {
5602     NvmeNamespace *ns;
5603     NvmeCtrl *ctrl;
5604     uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
5605     uint32_t nsid = le32_to_cpu(req->cmd.nsid);
5606     uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
5607     uint8_t sel = dw10 & 0xf;
5608     uint16_t *nr_ids = &list[0];
5609     uint16_t *ids = &list[1];
5610     uint16_t ret;
5611     int i;
5612 
5613     trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf);
5614 
5615     if (!nvme_nsid_valid(n, nsid)) {
5616         return NVME_INVALID_NSID | NVME_DNR;
5617     }
5618 
5619     ns = nvme_subsys_ns(n->subsys, nsid);
5620     if (!ns) {
5621         return NVME_INVALID_FIELD | NVME_DNR;
5622     }
5623 
5624     ret = nvme_h2c(n, (uint8_t *)list, 4096, req);
5625     if (ret) {
5626         return ret;
5627     }
5628 
5629     if (!*nr_ids) {
5630         return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
5631     }
5632 
5633     *nr_ids = MIN(*nr_ids, NVME_CONTROLLER_LIST_SIZE - 1);
5634     for (i = 0; i < *nr_ids; i++) {
5635         ctrl = nvme_subsys_ctrl(n->subsys, ids[i]);
5636         if (!ctrl) {
5637             return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
5638         }
5639 
5640         switch (sel) {
5641         case NVME_NS_ATTACHMENT_ATTACH:
5642             if (nvme_ns(ctrl, nsid)) {
5643                 return NVME_NS_ALREADY_ATTACHED | NVME_DNR;
5644             }
5645 
5646             if (ns->attached && !ns->params.shared) {
5647                 return NVME_NS_PRIVATE | NVME_DNR;
5648             }
5649 
5650             nvme_attach_ns(ctrl, ns);
5651             nvme_select_iocs_ns(ctrl, ns);
5652 
5653             break;
5654 
5655         case NVME_NS_ATTACHMENT_DETACH:
5656             if (!nvme_ns(ctrl, nsid)) {
5657                 return NVME_NS_NOT_ATTACHED | NVME_DNR;
5658             }
5659 
5660             ctrl->namespaces[nsid] = NULL;
5661             ns->attached--;
5662 
5663             nvme_update_dmrsl(ctrl);
5664 
5665             break;
5666 
5667         default:
5668             return NVME_INVALID_FIELD | NVME_DNR;
5669         }
5670 
5671         /*
5672          * Add namespace id to the changed namespace id list for event clearing
5673          * via Get Log Page command.
5674          */
5675         if (!test_and_set_bit(nsid, ctrl->changed_nsids)) {
5676             nvme_enqueue_event(ctrl, NVME_AER_TYPE_NOTICE,
5677                                NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED,
5678                                NVME_LOG_CHANGED_NSLIST);
5679         }
5680     }
5681 
5682     return NVME_SUCCESS;
5683 }
5684 
5685 typedef struct NvmeFormatAIOCB {
5686     BlockAIOCB common;
5687     BlockAIOCB *aiocb;
5688     NvmeRequest *req;
5689     int ret;
5690 
5691     NvmeNamespace *ns;
5692     uint32_t nsid;
5693     bool broadcast;
5694     int64_t offset;
5695 
5696     uint8_t lbaf;
5697     uint8_t mset;
5698     uint8_t pi;
5699     uint8_t pil;
5700 } NvmeFormatAIOCB;
5701 
5702 static void nvme_format_cancel(BlockAIOCB *aiocb)
5703 {
5704     NvmeFormatAIOCB *iocb = container_of(aiocb, NvmeFormatAIOCB, common);
5705 
5706     iocb->ret = -ECANCELED;
5707 
5708     if (iocb->aiocb) {
5709         blk_aio_cancel_async(iocb->aiocb);
5710         iocb->aiocb = NULL;
5711     }
5712 }
5713 
5714 static const AIOCBInfo nvme_format_aiocb_info = {
5715     .aiocb_size = sizeof(NvmeFormatAIOCB),
5716     .cancel_async = nvme_format_cancel,
5717     .get_aio_context = nvme_get_aio_context,
5718 };
5719 
5720 static void nvme_format_set(NvmeNamespace *ns, uint8_t lbaf, uint8_t mset,
5721                             uint8_t pi, uint8_t pil)
5722 {
5723     uint8_t lbafl = lbaf & 0xf;
5724     uint8_t lbafu = lbaf >> 4;
5725 
5726     trace_pci_nvme_format_set(ns->params.nsid, lbaf, mset, pi, pil);
5727 
5728     ns->id_ns.dps = (pil << 3) | pi;
5729     ns->id_ns.flbas = (lbafu << 5) | (mset << 4) | lbafl;
5730 
5731     nvme_ns_init_format(ns);
5732 }
5733 
5734 static void nvme_do_format(NvmeFormatAIOCB *iocb);
5735 
5736 static void nvme_format_ns_cb(void *opaque, int ret)
5737 {
5738     NvmeFormatAIOCB *iocb = opaque;
5739     NvmeNamespace *ns = iocb->ns;
5740     int bytes;
5741 
5742     if (iocb->ret < 0) {
5743         goto done;
5744     } else if (ret < 0) {
5745         iocb->ret = ret;
5746         goto done;
5747     }
5748 
5749     assert(ns);
5750 
5751     if (iocb->offset < ns->size) {
5752         bytes = MIN(BDRV_REQUEST_MAX_BYTES, ns->size - iocb->offset);
5753 
5754         iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, iocb->offset,
5755                                             bytes, BDRV_REQ_MAY_UNMAP,
5756                                             nvme_format_ns_cb, iocb);
5757 
5758         iocb->offset += bytes;
5759         return;
5760     }
5761 
5762     nvme_format_set(ns, iocb->lbaf, iocb->mset, iocb->pi, iocb->pil);
5763     ns->status = 0x0;
5764     iocb->ns = NULL;
5765     iocb->offset = 0;
5766 
5767 done:
5768     nvme_do_format(iocb);
5769 }
5770 
5771 static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi)
5772 {
5773     if (ns->params.zoned) {
5774         return NVME_INVALID_FORMAT | NVME_DNR;
5775     }
5776 
5777     if (lbaf > ns->id_ns.nlbaf) {
5778         return NVME_INVALID_FORMAT | NVME_DNR;
5779     }
5780 
5781     if (pi && (ns->id_ns.lbaf[lbaf].ms < nvme_pi_tuple_size(ns))) {
5782         return NVME_INVALID_FORMAT | NVME_DNR;
5783     }
5784 
5785     if (pi && pi > NVME_ID_NS_DPS_TYPE_3) {
5786         return NVME_INVALID_FIELD | NVME_DNR;
5787     }
5788 
5789     return NVME_SUCCESS;
5790 }
5791 
5792 static void nvme_do_format(NvmeFormatAIOCB *iocb)
5793 {
5794     NvmeRequest *req = iocb->req;
5795     NvmeCtrl *n = nvme_ctrl(req);
5796     uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
5797     uint8_t lbaf = dw10 & 0xf;
5798     uint8_t pi = (dw10 >> 5) & 0x7;
5799     uint16_t status;
5800     int i;
5801 
5802     if (iocb->ret < 0) {
5803         goto done;
5804     }
5805 
5806     if (iocb->broadcast) {
5807         for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) {
5808             iocb->ns = nvme_ns(n, i);
5809             if (iocb->ns) {
5810                 iocb->nsid = i;
5811                 break;
5812             }
5813         }
5814     }
5815 
5816     if (!iocb->ns) {
5817         goto done;
5818     }
5819 
5820     status = nvme_format_check(iocb->ns, lbaf, pi);
5821     if (status) {
5822         req->status = status;
5823         goto done;
5824     }
5825 
5826     iocb->ns->status = NVME_FORMAT_IN_PROGRESS;
5827     nvme_format_ns_cb(iocb, 0);
5828     return;
5829 
5830 done:
5831     iocb->common.cb(iocb->common.opaque, iocb->ret);
5832     qemu_aio_unref(iocb);
5833 }
5834 
5835 static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req)
5836 {
5837     NvmeFormatAIOCB *iocb;
5838     uint32_t nsid = le32_to_cpu(req->cmd.nsid);
5839     uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
5840     uint8_t lbaf = dw10 & 0xf;
5841     uint8_t mset = (dw10 >> 4) & 0x1;
5842     uint8_t pi = (dw10 >> 5) & 0x7;
5843     uint8_t pil = (dw10 >> 8) & 0x1;
5844     uint8_t lbafu = (dw10 >> 12) & 0x3;
5845     uint16_t status;
5846 
5847     iocb = qemu_aio_get(&nvme_format_aiocb_info, NULL, nvme_misc_cb, req);
5848 
5849     iocb->req = req;
5850     iocb->ret = 0;
5851     iocb->ns = NULL;
5852     iocb->nsid = 0;
5853     iocb->lbaf = lbaf;
5854     iocb->mset = mset;
5855     iocb->pi = pi;
5856     iocb->pil = pil;
5857     iocb->broadcast = (nsid == NVME_NSID_BROADCAST);
5858     iocb->offset = 0;
5859 
5860     if (n->features.hbs.lbafee) {
5861         iocb->lbaf |= lbafu << 4;
5862     }
5863 
5864     if (!iocb->broadcast) {
5865         if (!nvme_nsid_valid(n, nsid)) {
5866             status = NVME_INVALID_NSID | NVME_DNR;
5867             goto out;
5868         }
5869 
5870         iocb->ns = nvme_ns(n, nsid);
5871         if (!iocb->ns) {
5872             status = NVME_INVALID_FIELD | NVME_DNR;
5873             goto out;
5874         }
5875     }
5876 
5877     req->aiocb = &iocb->common;
5878     nvme_do_format(iocb);
5879 
5880     return NVME_NO_COMPLETE;
5881 
5882 out:
5883     qemu_aio_unref(iocb);
5884 
5885     return status;
5886 }
5887 
5888 static void nvme_get_virt_res_num(NvmeCtrl *n, uint8_t rt, int *num_total,
5889                                   int *num_prim, int *num_sec)
5890 {
5891     *num_total = le32_to_cpu(rt ?
5892                              n->pri_ctrl_cap.vifrt : n->pri_ctrl_cap.vqfrt);
5893     *num_prim = le16_to_cpu(rt ?
5894                             n->pri_ctrl_cap.virfap : n->pri_ctrl_cap.vqrfap);
5895     *num_sec = le16_to_cpu(rt ? n->pri_ctrl_cap.virfa : n->pri_ctrl_cap.vqrfa);
5896 }
5897 
5898 static uint16_t nvme_assign_virt_res_to_prim(NvmeCtrl *n, NvmeRequest *req,
5899                                              uint16_t cntlid, uint8_t rt,
5900                                              int nr)
5901 {
5902     int num_total, num_prim, num_sec;
5903 
5904     if (cntlid != n->cntlid) {
5905         return NVME_INVALID_CTRL_ID | NVME_DNR;
5906     }
5907 
5908     nvme_get_virt_res_num(n, rt, &num_total, &num_prim, &num_sec);
5909 
5910     if (nr > num_total) {
5911         return NVME_INVALID_NUM_RESOURCES | NVME_DNR;
5912     }
5913 
5914     if (nr > num_total - num_sec) {
5915         return NVME_INVALID_RESOURCE_ID | NVME_DNR;
5916     }
5917 
5918     if (rt) {
5919         n->next_pri_ctrl_cap.virfap = cpu_to_le16(nr);
5920     } else {
5921         n->next_pri_ctrl_cap.vqrfap = cpu_to_le16(nr);
5922     }
5923 
5924     req->cqe.result = cpu_to_le32(nr);
5925     return req->status;
5926 }
5927 
5928 static void nvme_update_virt_res(NvmeCtrl *n, NvmeSecCtrlEntry *sctrl,
5929                                  uint8_t rt, int nr)
5930 {
5931     int prev_nr, prev_total;
5932 
5933     if (rt) {
5934         prev_nr = le16_to_cpu(sctrl->nvi);
5935         prev_total = le32_to_cpu(n->pri_ctrl_cap.virfa);
5936         sctrl->nvi = cpu_to_le16(nr);
5937         n->pri_ctrl_cap.virfa = cpu_to_le32(prev_total + nr - prev_nr);
5938     } else {
5939         prev_nr = le16_to_cpu(sctrl->nvq);
5940         prev_total = le32_to_cpu(n->pri_ctrl_cap.vqrfa);
5941         sctrl->nvq = cpu_to_le16(nr);
5942         n->pri_ctrl_cap.vqrfa = cpu_to_le32(prev_total + nr - prev_nr);
5943     }
5944 }
5945 
5946 static uint16_t nvme_assign_virt_res_to_sec(NvmeCtrl *n, NvmeRequest *req,
5947                                             uint16_t cntlid, uint8_t rt, int nr)
5948 {
5949     int num_total, num_prim, num_sec, num_free, diff, limit;
5950     NvmeSecCtrlEntry *sctrl;
5951 
5952     sctrl = nvme_sctrl_for_cntlid(n, cntlid);
5953     if (!sctrl) {
5954         return NVME_INVALID_CTRL_ID | NVME_DNR;
5955     }
5956 
5957     if (sctrl->scs) {
5958         return NVME_INVALID_SEC_CTRL_STATE | NVME_DNR;
5959     }
5960 
5961     limit = le16_to_cpu(rt ? n->pri_ctrl_cap.vifrsm : n->pri_ctrl_cap.vqfrsm);
5962     if (nr > limit) {
5963         return NVME_INVALID_NUM_RESOURCES | NVME_DNR;
5964     }
5965 
5966     nvme_get_virt_res_num(n, rt, &num_total, &num_prim, &num_sec);
5967     num_free = num_total - num_prim - num_sec;
5968     diff = nr - le16_to_cpu(rt ? sctrl->nvi : sctrl->nvq);
5969 
5970     if (diff > num_free) {
5971         return NVME_INVALID_RESOURCE_ID | NVME_DNR;
5972     }
5973 
5974     nvme_update_virt_res(n, sctrl, rt, nr);
5975     req->cqe.result = cpu_to_le32(nr);
5976 
5977     return req->status;
5978 }
5979 
5980 static uint16_t nvme_virt_set_state(NvmeCtrl *n, uint16_t cntlid, bool online)
5981 {
5982     PCIDevice *pci = PCI_DEVICE(n);
5983     NvmeCtrl *sn = NULL;
5984     NvmeSecCtrlEntry *sctrl;
5985     int vf_index;
5986 
5987     sctrl = nvme_sctrl_for_cntlid(n, cntlid);
5988     if (!sctrl) {
5989         return NVME_INVALID_CTRL_ID | NVME_DNR;
5990     }
5991 
5992     if (!pci_is_vf(pci)) {
5993         vf_index = le16_to_cpu(sctrl->vfn) - 1;
5994         sn = NVME(pcie_sriov_get_vf_at_index(pci, vf_index));
5995     }
5996 
5997     if (online) {
5998         if (!sctrl->nvi || (le16_to_cpu(sctrl->nvq) < 2) || !sn) {
5999             return NVME_INVALID_SEC_CTRL_STATE | NVME_DNR;
6000         }
6001 
6002         if (!sctrl->scs) {
6003             sctrl->scs = 0x1;
6004             nvme_ctrl_reset(sn, NVME_RESET_FUNCTION);
6005         }
6006     } else {
6007         nvme_update_virt_res(n, sctrl, NVME_VIRT_RES_INTERRUPT, 0);
6008         nvme_update_virt_res(n, sctrl, NVME_VIRT_RES_QUEUE, 0);
6009 
6010         if (sctrl->scs) {
6011             sctrl->scs = 0x0;
6012             if (sn) {
6013                 nvme_ctrl_reset(sn, NVME_RESET_FUNCTION);
6014             }
6015         }
6016     }
6017 
6018     return NVME_SUCCESS;
6019 }
6020 
6021 static uint16_t nvme_virt_mngmt(NvmeCtrl *n, NvmeRequest *req)
6022 {
6023     uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
6024     uint32_t dw11 = le32_to_cpu(req->cmd.cdw11);
6025     uint8_t act = dw10 & 0xf;
6026     uint8_t rt = (dw10 >> 8) & 0x7;
6027     uint16_t cntlid = (dw10 >> 16) & 0xffff;
6028     int nr = dw11 & 0xffff;
6029 
6030     trace_pci_nvme_virt_mngmt(nvme_cid(req), act, cntlid, rt ? "VI" : "VQ", nr);
6031 
6032     if (rt != NVME_VIRT_RES_QUEUE && rt != NVME_VIRT_RES_INTERRUPT) {
6033         return NVME_INVALID_RESOURCE_ID | NVME_DNR;
6034     }
6035 
6036     switch (act) {
6037     case NVME_VIRT_MNGMT_ACTION_SEC_ASSIGN:
6038         return nvme_assign_virt_res_to_sec(n, req, cntlid, rt, nr);
6039     case NVME_VIRT_MNGMT_ACTION_PRM_ALLOC:
6040         return nvme_assign_virt_res_to_prim(n, req, cntlid, rt, nr);
6041     case NVME_VIRT_MNGMT_ACTION_SEC_ONLINE:
6042         return nvme_virt_set_state(n, cntlid, true);
6043     case NVME_VIRT_MNGMT_ACTION_SEC_OFFLINE:
6044         return nvme_virt_set_state(n, cntlid, false);
6045     default:
6046         return NVME_INVALID_FIELD | NVME_DNR;
6047     }
6048 }
6049 
6050 static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
6051 {
6052     PCIDevice *pci = PCI_DEVICE(n);
6053     uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1);
6054     uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2);
6055     int i;
6056 
6057     /* Address should be page aligned */
6058     if (dbs_addr & (n->page_size - 1) || eis_addr & (n->page_size - 1)) {
6059         return NVME_INVALID_FIELD | NVME_DNR;
6060     }
6061 
6062     /* Save shadow buffer base addr for use during queue creation */
6063     n->dbbuf_dbs = dbs_addr;
6064     n->dbbuf_eis = eis_addr;
6065     n->dbbuf_enabled = true;
6066 
6067     for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
6068         NvmeSQueue *sq = n->sq[i];
6069         NvmeCQueue *cq = n->cq[i];
6070 
6071         if (sq) {
6072             /*
6073              * CAP.DSTRD is 0, so offset of ith sq db_addr is (i<<3)
6074              * nvme_process_db() uses this hard-coded way to calculate
6075              * doorbell offsets. Be consistent with that here.
6076              */
6077             sq->db_addr = dbs_addr + (i << 3);
6078             sq->ei_addr = eis_addr + (i << 3);
6079             pci_dma_write(pci, sq->db_addr, &sq->tail, sizeof(sq->tail));
6080 
6081             if (n->params.ioeventfd && sq->sqid != 0) {
6082                 if (!nvme_init_sq_ioeventfd(sq)) {
6083                     sq->ioeventfd_enabled = true;
6084                 }
6085             }
6086         }
6087 
6088         if (cq) {
6089             /* CAP.DSTRD is 0, so offset of ith cq db_addr is (i<<3)+(1<<2) */
6090             cq->db_addr = dbs_addr + (i << 3) + (1 << 2);
6091             cq->ei_addr = eis_addr + (i << 3) + (1 << 2);
6092             pci_dma_write(pci, cq->db_addr, &cq->head, sizeof(cq->head));
6093 
6094             if (n->params.ioeventfd && cq->cqid != 0) {
6095                 if (!nvme_init_cq_ioeventfd(cq)) {
6096                     cq->ioeventfd_enabled = true;
6097                 }
6098             }
6099         }
6100     }
6101 
6102     trace_pci_nvme_dbbuf_config(dbs_addr, eis_addr);
6103 
6104     return NVME_SUCCESS;
6105 }
6106 
6107 static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
6108 {
6109     trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode,
6110                              nvme_adm_opc_str(req->cmd.opcode));
6111 
6112     if (!(nvme_cse_acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
6113         trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode);
6114         return NVME_INVALID_OPCODE | NVME_DNR;
6115     }
6116 
6117     /* SGLs shall not be used for Admin commands in NVMe over PCIe */
6118     if (NVME_CMD_FLAGS_PSDT(req->cmd.flags) != NVME_PSDT_PRP) {
6119         return NVME_INVALID_FIELD | NVME_DNR;
6120     }
6121 
6122     if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) {
6123         return NVME_INVALID_FIELD;
6124     }
6125 
6126     switch (req->cmd.opcode) {
6127     case NVME_ADM_CMD_DELETE_SQ:
6128         return nvme_del_sq(n, req);
6129     case NVME_ADM_CMD_CREATE_SQ:
6130         return nvme_create_sq(n, req);
6131     case NVME_ADM_CMD_GET_LOG_PAGE:
6132         return nvme_get_log(n, req);
6133     case NVME_ADM_CMD_DELETE_CQ:
6134         return nvme_del_cq(n, req);
6135     case NVME_ADM_CMD_CREATE_CQ:
6136         return nvme_create_cq(n, req);
6137     case NVME_ADM_CMD_IDENTIFY:
6138         return nvme_identify(n, req);
6139     case NVME_ADM_CMD_ABORT:
6140         return nvme_abort(n, req);
6141     case NVME_ADM_CMD_SET_FEATURES:
6142         return nvme_set_feature(n, req);
6143     case NVME_ADM_CMD_GET_FEATURES:
6144         return nvme_get_feature(n, req);
6145     case NVME_ADM_CMD_ASYNC_EV_REQ:
6146         return nvme_aer(n, req);
6147     case NVME_ADM_CMD_NS_ATTACHMENT:
6148         return nvme_ns_attachment(n, req);
6149     case NVME_ADM_CMD_VIRT_MNGMT:
6150         return nvme_virt_mngmt(n, req);
6151     case NVME_ADM_CMD_DBBUF_CONFIG:
6152         return nvme_dbbuf_config(n, req);
6153     case NVME_ADM_CMD_FORMAT_NVM:
6154         return nvme_format(n, req);
6155     default:
6156         assert(false);
6157     }
6158 
6159     return NVME_INVALID_OPCODE | NVME_DNR;
6160 }
6161 
6162 static void nvme_update_sq_eventidx(const NvmeSQueue *sq)
6163 {
6164     uint32_t v = cpu_to_le32(sq->tail);
6165 
6166     trace_pci_nvme_update_sq_eventidx(sq->sqid, sq->tail);
6167 
6168     pci_dma_write(PCI_DEVICE(sq->ctrl), sq->ei_addr, &v, sizeof(v));
6169 }
6170 
6171 static void nvme_update_sq_tail(NvmeSQueue *sq)
6172 {
6173     uint32_t v;
6174 
6175     pci_dma_read(PCI_DEVICE(sq->ctrl), sq->db_addr, &v, sizeof(v));
6176 
6177     sq->tail = le32_to_cpu(v);
6178 
6179     trace_pci_nvme_update_sq_tail(sq->sqid, sq->tail);
6180 }
6181 
6182 static void nvme_process_sq(void *opaque)
6183 {
6184     NvmeSQueue *sq = opaque;
6185     NvmeCtrl *n = sq->ctrl;
6186     NvmeCQueue *cq = n->cq[sq->cqid];
6187 
6188     uint16_t status;
6189     hwaddr addr;
6190     NvmeCmd cmd;
6191     NvmeRequest *req;
6192 
6193     if (n->dbbuf_enabled) {
6194         nvme_update_sq_tail(sq);
6195     }
6196 
6197     while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
6198         addr = sq->dma_addr + sq->head * n->sqe_size;
6199         if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
6200             trace_pci_nvme_err_addr_read(addr);
6201             trace_pci_nvme_err_cfs();
6202             stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
6203             break;
6204         }
6205         nvme_inc_sq_head(sq);
6206 
6207         req = QTAILQ_FIRST(&sq->req_list);
6208         QTAILQ_REMOVE(&sq->req_list, req, entry);
6209         QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
6210         nvme_req_clear(req);
6211         req->cqe.cid = cmd.cid;
6212         memcpy(&req->cmd, &cmd, sizeof(NvmeCmd));
6213 
6214         status = sq->sqid ? nvme_io_cmd(n, req) :
6215             nvme_admin_cmd(n, req);
6216         if (status != NVME_NO_COMPLETE) {
6217             req->status = status;
6218             nvme_enqueue_req_completion(cq, req);
6219         }
6220 
6221         if (n->dbbuf_enabled) {
6222             nvme_update_sq_eventidx(sq);
6223             nvme_update_sq_tail(sq);
6224         }
6225     }
6226 }
6227 
6228 static void nvme_update_msixcap_ts(PCIDevice *pci_dev, uint32_t table_size)
6229 {
6230     uint8_t *config;
6231 
6232     if (!msix_present(pci_dev)) {
6233         return;
6234     }
6235 
6236     assert(table_size > 0 && table_size <= pci_dev->msix_entries_nr);
6237 
6238     config = pci_dev->config + pci_dev->msix_cap;
6239     pci_set_word_by_mask(config + PCI_MSIX_FLAGS, PCI_MSIX_FLAGS_QSIZE,
6240                          table_size - 1);
6241 }
6242 
6243 static void nvme_activate_virt_res(NvmeCtrl *n)
6244 {
6245     PCIDevice *pci_dev = PCI_DEVICE(n);
6246     NvmePriCtrlCap *cap = &n->pri_ctrl_cap;
6247     NvmeSecCtrlEntry *sctrl;
6248 
6249     /* -1 to account for the admin queue */
6250     if (pci_is_vf(pci_dev)) {
6251         sctrl = nvme_sctrl(n);
6252         cap->vqprt = sctrl->nvq;
6253         cap->viprt = sctrl->nvi;
6254         n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0;
6255         n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1;
6256     } else {
6257         cap->vqrfap = n->next_pri_ctrl_cap.vqrfap;
6258         cap->virfap = n->next_pri_ctrl_cap.virfap;
6259         n->conf_ioqpairs = le16_to_cpu(cap->vqprt) +
6260                            le16_to_cpu(cap->vqrfap) - 1;
6261         n->conf_msix_qsize = le16_to_cpu(cap->viprt) +
6262                              le16_to_cpu(cap->virfap);
6263     }
6264 }
6265 
6266 static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst)
6267 {
6268     PCIDevice *pci_dev = PCI_DEVICE(n);
6269     NvmeSecCtrlEntry *sctrl;
6270     NvmeNamespace *ns;
6271     int i;
6272 
6273     for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
6274         ns = nvme_ns(n, i);
6275         if (!ns) {
6276             continue;
6277         }
6278 
6279         nvme_ns_drain(ns);
6280     }
6281 
6282     for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
6283         if (n->sq[i] != NULL) {
6284             nvme_free_sq(n->sq[i], n);
6285         }
6286     }
6287     for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
6288         if (n->cq[i] != NULL) {
6289             nvme_free_cq(n->cq[i], n);
6290         }
6291     }
6292 
6293     while (!QTAILQ_EMPTY(&n->aer_queue)) {
6294         NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue);
6295         QTAILQ_REMOVE(&n->aer_queue, event, entry);
6296         g_free(event);
6297     }
6298 
6299     if (n->params.sriov_max_vfs) {
6300         if (!pci_is_vf(pci_dev)) {
6301             for (i = 0; i < n->sec_ctrl_list.numcntl; i++) {
6302                 sctrl = &n->sec_ctrl_list.sec[i];
6303                 nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false);
6304             }
6305 
6306             if (rst != NVME_RESET_CONTROLLER) {
6307                 pcie_sriov_pf_disable_vfs(pci_dev);
6308             }
6309         }
6310 
6311         if (rst != NVME_RESET_CONTROLLER) {
6312             nvme_activate_virt_res(n);
6313         }
6314     }
6315 
6316     n->aer_queued = 0;
6317     n->aer_mask = 0;
6318     n->outstanding_aers = 0;
6319     n->qs_created = false;
6320 
6321     nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize);
6322 
6323     if (pci_is_vf(pci_dev)) {
6324         sctrl = nvme_sctrl(n);
6325 
6326         stl_le_p(&n->bar.csts, sctrl->scs ? 0 : NVME_CSTS_FAILED);
6327     } else {
6328         stl_le_p(&n->bar.csts, 0);
6329     }
6330 
6331     stl_le_p(&n->bar.intms, 0);
6332     stl_le_p(&n->bar.intmc, 0);
6333     stl_le_p(&n->bar.cc, 0);
6334 
6335     n->dbbuf_dbs = 0;
6336     n->dbbuf_eis = 0;
6337     n->dbbuf_enabled = false;
6338 }
6339 
6340 static void nvme_ctrl_shutdown(NvmeCtrl *n)
6341 {
6342     NvmeNamespace *ns;
6343     int i;
6344 
6345     if (n->pmr.dev) {
6346         memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size);
6347     }
6348 
6349     for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
6350         ns = nvme_ns(n, i);
6351         if (!ns) {
6352             continue;
6353         }
6354 
6355         nvme_ns_shutdown(ns);
6356     }
6357 }
6358 
6359 static void nvme_select_iocs(NvmeCtrl *n)
6360 {
6361     NvmeNamespace *ns;
6362     int i;
6363 
6364     for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
6365         ns = nvme_ns(n, i);
6366         if (!ns) {
6367             continue;
6368         }
6369 
6370         nvme_select_iocs_ns(n, ns);
6371     }
6372 }
6373 
6374 static int nvme_start_ctrl(NvmeCtrl *n)
6375 {
6376     uint64_t cap = ldq_le_p(&n->bar.cap);
6377     uint32_t cc = ldl_le_p(&n->bar.cc);
6378     uint32_t aqa = ldl_le_p(&n->bar.aqa);
6379     uint64_t asq = ldq_le_p(&n->bar.asq);
6380     uint64_t acq = ldq_le_p(&n->bar.acq);
6381     uint32_t page_bits = NVME_CC_MPS(cc) + 12;
6382     uint32_t page_size = 1 << page_bits;
6383     NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
6384 
6385     if (pci_is_vf(PCI_DEVICE(n)) && !sctrl->scs) {
6386         trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi),
6387                                                 le16_to_cpu(sctrl->nvq),
6388                                                 sctrl->scs ? "ONLINE" :
6389                                                              "OFFLINE");
6390         return -1;
6391     }
6392     if (unlikely(n->cq[0])) {
6393         trace_pci_nvme_err_startfail_cq();
6394         return -1;
6395     }
6396     if (unlikely(n->sq[0])) {
6397         trace_pci_nvme_err_startfail_sq();
6398         return -1;
6399     }
6400     if (unlikely(asq & (page_size - 1))) {
6401         trace_pci_nvme_err_startfail_asq_misaligned(asq);
6402         return -1;
6403     }
6404     if (unlikely(acq & (page_size - 1))) {
6405         trace_pci_nvme_err_startfail_acq_misaligned(acq);
6406         return -1;
6407     }
6408     if (unlikely(!(NVME_CAP_CSS(cap) & (1 << NVME_CC_CSS(cc))))) {
6409         trace_pci_nvme_err_startfail_css(NVME_CC_CSS(cc));
6410         return -1;
6411     }
6412     if (unlikely(NVME_CC_MPS(cc) < NVME_CAP_MPSMIN(cap))) {
6413         trace_pci_nvme_err_startfail_page_too_small(
6414                     NVME_CC_MPS(cc),
6415                     NVME_CAP_MPSMIN(cap));
6416         return -1;
6417     }
6418     if (unlikely(NVME_CC_MPS(cc) >
6419                  NVME_CAP_MPSMAX(cap))) {
6420         trace_pci_nvme_err_startfail_page_too_large(
6421                     NVME_CC_MPS(cc),
6422                     NVME_CAP_MPSMAX(cap));
6423         return -1;
6424     }
6425     if (unlikely(NVME_CC_IOCQES(cc) <
6426                  NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
6427         trace_pci_nvme_err_startfail_cqent_too_small(
6428                     NVME_CC_IOCQES(cc),
6429                     NVME_CTRL_CQES_MIN(cap));
6430         return -1;
6431     }
6432     if (unlikely(NVME_CC_IOCQES(cc) >
6433                  NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
6434         trace_pci_nvme_err_startfail_cqent_too_large(
6435                     NVME_CC_IOCQES(cc),
6436                     NVME_CTRL_CQES_MAX(cap));
6437         return -1;
6438     }
6439     if (unlikely(NVME_CC_IOSQES(cc) <
6440                  NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
6441         trace_pci_nvme_err_startfail_sqent_too_small(
6442                     NVME_CC_IOSQES(cc),
6443                     NVME_CTRL_SQES_MIN(cap));
6444         return -1;
6445     }
6446     if (unlikely(NVME_CC_IOSQES(cc) >
6447                  NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
6448         trace_pci_nvme_err_startfail_sqent_too_large(
6449                     NVME_CC_IOSQES(cc),
6450                     NVME_CTRL_SQES_MAX(cap));
6451         return -1;
6452     }
6453     if (unlikely(!NVME_AQA_ASQS(aqa))) {
6454         trace_pci_nvme_err_startfail_asqent_sz_zero();
6455         return -1;
6456     }
6457     if (unlikely(!NVME_AQA_ACQS(aqa))) {
6458         trace_pci_nvme_err_startfail_acqent_sz_zero();
6459         return -1;
6460     }
6461 
6462     n->page_bits = page_bits;
6463     n->page_size = page_size;
6464     n->max_prp_ents = n->page_size / sizeof(uint64_t);
6465     n->cqe_size = 1 << NVME_CC_IOCQES(cc);
6466     n->sqe_size = 1 << NVME_CC_IOSQES(cc);
6467     nvme_init_cq(&n->admin_cq, n, acq, 0, 0, NVME_AQA_ACQS(aqa) + 1, 1);
6468     nvme_init_sq(&n->admin_sq, n, asq, 0, 0, NVME_AQA_ASQS(aqa) + 1);
6469 
6470     nvme_set_timestamp(n, 0ULL);
6471 
6472     nvme_select_iocs(n);
6473 
6474     return 0;
6475 }
6476 
6477 static void nvme_cmb_enable_regs(NvmeCtrl *n)
6478 {
6479     uint32_t cmbloc = ldl_le_p(&n->bar.cmbloc);
6480     uint32_t cmbsz = ldl_le_p(&n->bar.cmbsz);
6481 
6482     NVME_CMBLOC_SET_CDPCILS(cmbloc, 1);
6483     NVME_CMBLOC_SET_CDPMLS(cmbloc, 1);
6484     NVME_CMBLOC_SET_BIR(cmbloc, NVME_CMB_BIR);
6485     stl_le_p(&n->bar.cmbloc, cmbloc);
6486 
6487     NVME_CMBSZ_SET_SQS(cmbsz, 1);
6488     NVME_CMBSZ_SET_CQS(cmbsz, 0);
6489     NVME_CMBSZ_SET_LISTS(cmbsz, 1);
6490     NVME_CMBSZ_SET_RDS(cmbsz, 1);
6491     NVME_CMBSZ_SET_WDS(cmbsz, 1);
6492     NVME_CMBSZ_SET_SZU(cmbsz, 2); /* MBs */
6493     NVME_CMBSZ_SET_SZ(cmbsz, n->params.cmb_size_mb);
6494     stl_le_p(&n->bar.cmbsz, cmbsz);
6495 }
6496 
6497 static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
6498                            unsigned size)
6499 {
6500     PCIDevice *pci = PCI_DEVICE(n);
6501     uint64_t cap = ldq_le_p(&n->bar.cap);
6502     uint32_t cc = ldl_le_p(&n->bar.cc);
6503     uint32_t intms = ldl_le_p(&n->bar.intms);
6504     uint32_t csts = ldl_le_p(&n->bar.csts);
6505     uint32_t pmrsts = ldl_le_p(&n->bar.pmrsts);
6506 
6507     if (unlikely(offset & (sizeof(uint32_t) - 1))) {
6508         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32,
6509                        "MMIO write not 32-bit aligned,"
6510                        " offset=0x%"PRIx64"", offset);
6511         /* should be ignored, fall through for now */
6512     }
6513 
6514     if (unlikely(size < sizeof(uint32_t))) {
6515         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall,
6516                        "MMIO write smaller than 32-bits,"
6517                        " offset=0x%"PRIx64", size=%u",
6518                        offset, size);
6519         /* should be ignored, fall through for now */
6520     }
6521 
6522     switch (offset) {
6523     case NVME_REG_INTMS:
6524         if (unlikely(msix_enabled(pci))) {
6525             NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
6526                            "undefined access to interrupt mask set"
6527                            " when MSI-X is enabled");
6528             /* should be ignored, fall through for now */
6529         }
6530         intms |= data;
6531         stl_le_p(&n->bar.intms, intms);
6532         n->bar.intmc = n->bar.intms;
6533         trace_pci_nvme_mmio_intm_set(data & 0xffffffff, intms);
6534         nvme_irq_check(n);
6535         break;
6536     case NVME_REG_INTMC:
6537         if (unlikely(msix_enabled(pci))) {
6538             NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
6539                            "undefined access to interrupt mask clr"
6540                            " when MSI-X is enabled");
6541             /* should be ignored, fall through for now */
6542         }
6543         intms &= ~data;
6544         stl_le_p(&n->bar.intms, intms);
6545         n->bar.intmc = n->bar.intms;
6546         trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, intms);
6547         nvme_irq_check(n);
6548         break;
6549     case NVME_REG_CC:
6550         stl_le_p(&n->bar.cc, data);
6551 
6552         trace_pci_nvme_mmio_cfg(data & 0xffffffff);
6553 
6554         if (NVME_CC_SHN(data) && !(NVME_CC_SHN(cc))) {
6555             trace_pci_nvme_mmio_shutdown_set();
6556             nvme_ctrl_shutdown(n);
6557             csts &= ~(CSTS_SHST_MASK << CSTS_SHST_SHIFT);
6558             csts |= NVME_CSTS_SHST_COMPLETE;
6559         } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(cc)) {
6560             trace_pci_nvme_mmio_shutdown_cleared();
6561             csts &= ~(CSTS_SHST_MASK << CSTS_SHST_SHIFT);
6562         }
6563 
6564         if (NVME_CC_EN(data) && !NVME_CC_EN(cc)) {
6565             if (unlikely(nvme_start_ctrl(n))) {
6566                 trace_pci_nvme_err_startfail();
6567                 csts = NVME_CSTS_FAILED;
6568             } else {
6569                 trace_pci_nvme_mmio_start_success();
6570                 csts = NVME_CSTS_READY;
6571             }
6572         } else if (!NVME_CC_EN(data) && NVME_CC_EN(cc)) {
6573             trace_pci_nvme_mmio_stopped();
6574             nvme_ctrl_reset(n, NVME_RESET_CONTROLLER);
6575 
6576             break;
6577         }
6578 
6579         stl_le_p(&n->bar.csts, csts);
6580 
6581         break;
6582     case NVME_REG_CSTS:
6583         if (data & (1 << 4)) {
6584             NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported,
6585                            "attempted to W1C CSTS.NSSRO"
6586                            " but CAP.NSSRS is zero (not supported)");
6587         } else if (data != 0) {
6588             NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts,
6589                            "attempted to set a read only bit"
6590                            " of controller status");
6591         }
6592         break;
6593     case NVME_REG_NSSR:
6594         if (data == 0x4e564d65) {
6595             trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
6596         } else {
6597             /* The spec says that writes of other values have no effect */
6598             return;
6599         }
6600         break;
6601     case NVME_REG_AQA:
6602         stl_le_p(&n->bar.aqa, data);
6603         trace_pci_nvme_mmio_aqattr(data & 0xffffffff);
6604         break;
6605     case NVME_REG_ASQ:
6606         stn_le_p(&n->bar.asq, size, data);
6607         trace_pci_nvme_mmio_asqaddr(data);
6608         break;
6609     case NVME_REG_ASQ + 4:
6610         stl_le_p((uint8_t *)&n->bar.asq + 4, data);
6611         trace_pci_nvme_mmio_asqaddr_hi(data, ldq_le_p(&n->bar.asq));
6612         break;
6613     case NVME_REG_ACQ:
6614         trace_pci_nvme_mmio_acqaddr(data);
6615         stn_le_p(&n->bar.acq, size, data);
6616         break;
6617     case NVME_REG_ACQ + 4:
6618         stl_le_p((uint8_t *)&n->bar.acq + 4, data);
6619         trace_pci_nvme_mmio_acqaddr_hi(data, ldq_le_p(&n->bar.acq));
6620         break;
6621     case NVME_REG_CMBLOC:
6622         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved,
6623                        "invalid write to reserved CMBLOC"
6624                        " when CMBSZ is zero, ignored");
6625         return;
6626     case NVME_REG_CMBSZ:
6627         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly,
6628                        "invalid write to read only CMBSZ, ignored");
6629         return;
6630     case NVME_REG_CMBMSC:
6631         if (!NVME_CAP_CMBS(cap)) {
6632             return;
6633         }
6634 
6635         stn_le_p(&n->bar.cmbmsc, size, data);
6636         n->cmb.cmse = false;
6637 
6638         if (NVME_CMBMSC_CRE(data)) {
6639             nvme_cmb_enable_regs(n);
6640 
6641             if (NVME_CMBMSC_CMSE(data)) {
6642                 uint64_t cmbmsc = ldq_le_p(&n->bar.cmbmsc);
6643                 hwaddr cba = NVME_CMBMSC_CBA(cmbmsc) << CMBMSC_CBA_SHIFT;
6644                 if (cba + int128_get64(n->cmb.mem.size) < cba) {
6645                     uint32_t cmbsts = ldl_le_p(&n->bar.cmbsts);
6646                     NVME_CMBSTS_SET_CBAI(cmbsts, 1);
6647                     stl_le_p(&n->bar.cmbsts, cmbsts);
6648                     return;
6649                 }
6650 
6651                 n->cmb.cba = cba;
6652                 n->cmb.cmse = true;
6653             }
6654         } else {
6655             n->bar.cmbsz = 0;
6656             n->bar.cmbloc = 0;
6657         }
6658 
6659         return;
6660     case NVME_REG_CMBMSC + 4:
6661         stl_le_p((uint8_t *)&n->bar.cmbmsc + 4, data);
6662         return;
6663 
6664     case NVME_REG_PMRCAP:
6665         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly,
6666                        "invalid write to PMRCAP register, ignored");
6667         return;
6668     case NVME_REG_PMRCTL:
6669         if (!NVME_CAP_PMRS(cap)) {
6670             return;
6671         }
6672 
6673         stl_le_p(&n->bar.pmrctl, data);
6674         if (NVME_PMRCTL_EN(data)) {
6675             memory_region_set_enabled(&n->pmr.dev->mr, true);
6676             pmrsts = 0;
6677         } else {
6678             memory_region_set_enabled(&n->pmr.dev->mr, false);
6679             NVME_PMRSTS_SET_NRDY(pmrsts, 1);
6680             n->pmr.cmse = false;
6681         }
6682         stl_le_p(&n->bar.pmrsts, pmrsts);
6683         return;
6684     case NVME_REG_PMRSTS:
6685         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly,
6686                        "invalid write to PMRSTS register, ignored");
6687         return;
6688     case NVME_REG_PMREBS:
6689         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly,
6690                        "invalid write to PMREBS register, ignored");
6691         return;
6692     case NVME_REG_PMRSWTP:
6693         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly,
6694                        "invalid write to PMRSWTP register, ignored");
6695         return;
6696     case NVME_REG_PMRMSCL:
6697         if (!NVME_CAP_PMRS(cap)) {
6698             return;
6699         }
6700 
6701         stl_le_p(&n->bar.pmrmscl, data);
6702         n->pmr.cmse = false;
6703 
6704         if (NVME_PMRMSCL_CMSE(data)) {
6705             uint64_t pmrmscu = ldl_le_p(&n->bar.pmrmscu);
6706             hwaddr cba = pmrmscu << 32 |
6707                 (NVME_PMRMSCL_CBA(data) << PMRMSCL_CBA_SHIFT);
6708             if (cba + int128_get64(n->pmr.dev->mr.size) < cba) {
6709                 NVME_PMRSTS_SET_CBAI(pmrsts, 1);
6710                 stl_le_p(&n->bar.pmrsts, pmrsts);
6711                 return;
6712             }
6713 
6714             n->pmr.cmse = true;
6715             n->pmr.cba = cba;
6716         }
6717 
6718         return;
6719     case NVME_REG_PMRMSCU:
6720         if (!NVME_CAP_PMRS(cap)) {
6721             return;
6722         }
6723 
6724         stl_le_p(&n->bar.pmrmscu, data);
6725         return;
6726     default:
6727         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid,
6728                        "invalid MMIO write,"
6729                        " offset=0x%"PRIx64", data=%"PRIx64"",
6730                        offset, data);
6731         break;
6732     }
6733 }
6734 
6735 static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
6736 {
6737     NvmeCtrl *n = (NvmeCtrl *)opaque;
6738     uint8_t *ptr = (uint8_t *)&n->bar;
6739 
6740     trace_pci_nvme_mmio_read(addr, size);
6741 
6742     if (unlikely(addr & (sizeof(uint32_t) - 1))) {
6743         NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32,
6744                        "MMIO read not 32-bit aligned,"
6745                        " offset=0x%"PRIx64"", addr);
6746         /* should RAZ, fall through for now */
6747     } else if (unlikely(size < sizeof(uint32_t))) {
6748         NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall,
6749                        "MMIO read smaller than 32-bits,"
6750                        " offset=0x%"PRIx64"", addr);
6751         /* should RAZ, fall through for now */
6752     }
6753 
6754     if (addr > sizeof(n->bar) - size) {
6755         NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs,
6756                        "MMIO read beyond last register,"
6757                        " offset=0x%"PRIx64", returning 0", addr);
6758 
6759         return 0;
6760     }
6761 
6762     if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs &&
6763         addr != NVME_REG_CSTS) {
6764         trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size);
6765         return 0;
6766     }
6767 
6768     /*
6769      * When PMRWBM bit 1 is set then read from
6770      * from PMRSTS should ensure prior writes
6771      * made it to persistent media
6772      */
6773     if (addr == NVME_REG_PMRSTS &&
6774         (NVME_PMRCAP_PMRWBM(ldl_le_p(&n->bar.pmrcap)) & 0x02)) {
6775         memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size);
6776     }
6777 
6778     return ldn_le_p(ptr + addr, size);
6779 }
6780 
6781 static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
6782 {
6783     PCIDevice *pci = PCI_DEVICE(n);
6784     uint32_t qid;
6785 
6786     if (unlikely(addr & ((1 << 2) - 1))) {
6787         NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned,
6788                        "doorbell write not 32-bit aligned,"
6789                        " offset=0x%"PRIx64", ignoring", addr);
6790         return;
6791     }
6792 
6793     if (((addr - 0x1000) >> 2) & 1) {
6794         /* Completion queue doorbell write */
6795 
6796         uint16_t new_head = val & 0xffff;
6797         int start_sqs;
6798         NvmeCQueue *cq;
6799 
6800         qid = (addr - (0x1000 + (1 << 2))) >> 3;
6801         if (unlikely(nvme_check_cqid(n, qid))) {
6802             NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq,
6803                            "completion queue doorbell write"
6804                            " for nonexistent queue,"
6805                            " sqid=%"PRIu32", ignoring", qid);
6806 
6807             /*
6808              * NVM Express v1.3d, Section 4.1 state: "If host software writes
6809              * an invalid value to the Submission Queue Tail Doorbell or
6810              * Completion Queue Head Doorbell regiter and an Asynchronous Event
6811              * Request command is outstanding, then an asynchronous event is
6812              * posted to the Admin Completion Queue with a status code of
6813              * Invalid Doorbell Write Value."
6814              *
6815              * Also note that the spec includes the "Invalid Doorbell Register"
6816              * status code, but nowhere does it specify when to use it.
6817              * However, it seems reasonable to use it here in a similar
6818              * fashion.
6819              */
6820             if (n->outstanding_aers) {
6821                 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
6822                                    NVME_AER_INFO_ERR_INVALID_DB_REGISTER,
6823                                    NVME_LOG_ERROR_INFO);
6824             }
6825 
6826             return;
6827         }
6828 
6829         cq = n->cq[qid];
6830         if (unlikely(new_head >= cq->size)) {
6831             NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead,
6832                            "completion queue doorbell write value"
6833                            " beyond queue size, sqid=%"PRIu32","
6834                            " new_head=%"PRIu16", ignoring",
6835                            qid, new_head);
6836 
6837             if (n->outstanding_aers) {
6838                 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
6839                                    NVME_AER_INFO_ERR_INVALID_DB_VALUE,
6840                                    NVME_LOG_ERROR_INFO);
6841             }
6842 
6843             return;
6844         }
6845 
6846         trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head);
6847 
6848         start_sqs = nvme_cq_full(cq) ? 1 : 0;
6849         cq->head = new_head;
6850         if (!qid && n->dbbuf_enabled) {
6851             pci_dma_write(pci, cq->db_addr, &cq->head, sizeof(cq->head));
6852         }
6853         if (start_sqs) {
6854             NvmeSQueue *sq;
6855             QTAILQ_FOREACH(sq, &cq->sq_list, entry) {
6856                 qemu_bh_schedule(sq->bh);
6857             }
6858             qemu_bh_schedule(cq->bh);
6859         }
6860 
6861         if (cq->tail == cq->head) {
6862             if (cq->irq_enabled) {
6863                 n->cq_pending--;
6864             }
6865 
6866             nvme_irq_deassert(n, cq);
6867         }
6868     } else {
6869         /* Submission queue doorbell write */
6870 
6871         uint16_t new_tail = val & 0xffff;
6872         NvmeSQueue *sq;
6873 
6874         qid = (addr - 0x1000) >> 3;
6875         if (unlikely(nvme_check_sqid(n, qid))) {
6876             NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq,
6877                            "submission queue doorbell write"
6878                            " for nonexistent queue,"
6879                            " sqid=%"PRIu32", ignoring", qid);
6880 
6881             if (n->outstanding_aers) {
6882                 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
6883                                    NVME_AER_INFO_ERR_INVALID_DB_REGISTER,
6884                                    NVME_LOG_ERROR_INFO);
6885             }
6886 
6887             return;
6888         }
6889 
6890         sq = n->sq[qid];
6891         if (unlikely(new_tail >= sq->size)) {
6892             NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail,
6893                            "submission queue doorbell write value"
6894                            " beyond queue size, sqid=%"PRIu32","
6895                            " new_tail=%"PRIu16", ignoring",
6896                            qid, new_tail);
6897 
6898             if (n->outstanding_aers) {
6899                 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
6900                                    NVME_AER_INFO_ERR_INVALID_DB_VALUE,
6901                                    NVME_LOG_ERROR_INFO);
6902             }
6903 
6904             return;
6905         }
6906 
6907         trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail);
6908 
6909         sq->tail = new_tail;
6910         if (!qid && n->dbbuf_enabled) {
6911             /*
6912              * The spec states "the host shall also update the controller's
6913              * corresponding doorbell property to match the value of that entry
6914              * in the Shadow Doorbell buffer."
6915              *
6916              * Since this context is currently a VM trap, we can safely enforce
6917              * the requirement from the device side in case the host is
6918              * misbehaving.
6919              *
6920              * Note, we shouldn't have to do this, but various drivers
6921              * including ones that run on Linux, are not updating Admin Queues,
6922              * so we can't trust reading it for an appropriate sq tail.
6923              */
6924             pci_dma_write(pci, sq->db_addr, &sq->tail, sizeof(sq->tail));
6925         }
6926 
6927         qemu_bh_schedule(sq->bh);
6928     }
6929 }
6930 
6931 static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data,
6932                             unsigned size)
6933 {
6934     NvmeCtrl *n = (NvmeCtrl *)opaque;
6935 
6936     trace_pci_nvme_mmio_write(addr, data, size);
6937 
6938     if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs &&
6939         addr != NVME_REG_CSTS) {
6940         trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size);
6941         return;
6942     }
6943 
6944     if (addr < sizeof(n->bar)) {
6945         nvme_write_bar(n, addr, data, size);
6946     } else {
6947         nvme_process_db(n, addr, data);
6948     }
6949 }
6950 
6951 static const MemoryRegionOps nvme_mmio_ops = {
6952     .read = nvme_mmio_read,
6953     .write = nvme_mmio_write,
6954     .endianness = DEVICE_LITTLE_ENDIAN,
6955     .impl = {
6956         .min_access_size = 2,
6957         .max_access_size = 8,
6958     },
6959 };
6960 
6961 static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data,
6962                            unsigned size)
6963 {
6964     NvmeCtrl *n = (NvmeCtrl *)opaque;
6965     stn_le_p(&n->cmb.buf[addr], size, data);
6966 }
6967 
6968 static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size)
6969 {
6970     NvmeCtrl *n = (NvmeCtrl *)opaque;
6971     return ldn_le_p(&n->cmb.buf[addr], size);
6972 }
6973 
6974 static const MemoryRegionOps nvme_cmb_ops = {
6975     .read = nvme_cmb_read,
6976     .write = nvme_cmb_write,
6977     .endianness = DEVICE_LITTLE_ENDIAN,
6978     .impl = {
6979         .min_access_size = 1,
6980         .max_access_size = 8,
6981     },
6982 };
6983 
6984 static bool nvme_check_params(NvmeCtrl *n, Error **errp)
6985 {
6986     NvmeParams *params = &n->params;
6987 
6988     if (params->num_queues) {
6989         warn_report("num_queues is deprecated; please use max_ioqpairs "
6990                     "instead");
6991 
6992         params->max_ioqpairs = params->num_queues - 1;
6993     }
6994 
6995     if (n->namespace.blkconf.blk && n->subsys) {
6996         error_setg(errp, "subsystem support is unavailable with legacy "
6997                    "namespace ('drive' property)");
6998         return false;
6999     }
7000 
7001     if (params->max_ioqpairs < 1 ||
7002         params->max_ioqpairs > NVME_MAX_IOQPAIRS) {
7003         error_setg(errp, "max_ioqpairs must be between 1 and %d",
7004                    NVME_MAX_IOQPAIRS);
7005         return false;
7006     }
7007 
7008     if (params->msix_qsize < 1 ||
7009         params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) {
7010         error_setg(errp, "msix_qsize must be between 1 and %d",
7011                    PCI_MSIX_FLAGS_QSIZE + 1);
7012         return false;
7013     }
7014 
7015     if (!params->serial) {
7016         error_setg(errp, "serial property not set");
7017         return false;
7018     }
7019 
7020     if (n->pmr.dev) {
7021         if (host_memory_backend_is_mapped(n->pmr.dev)) {
7022             error_setg(errp, "can't use already busy memdev: %s",
7023                        object_get_canonical_path_component(OBJECT(n->pmr.dev)));
7024             return false;
7025         }
7026 
7027         if (!is_power_of_2(n->pmr.dev->size)) {
7028             error_setg(errp, "pmr backend size needs to be power of 2 in size");
7029             return false;
7030         }
7031 
7032         host_memory_backend_set_mapped(n->pmr.dev, true);
7033     }
7034 
7035     if (n->params.zasl > n->params.mdts) {
7036         error_setg(errp, "zoned.zasl (Zone Append Size Limit) must be less "
7037                    "than or equal to mdts (Maximum Data Transfer Size)");
7038         return false;
7039     }
7040 
7041     if (!n->params.vsl) {
7042         error_setg(errp, "vsl must be non-zero");
7043         return false;
7044     }
7045 
7046     if (params->sriov_max_vfs) {
7047         if (!n->subsys) {
7048             error_setg(errp, "subsystem is required for the use of SR-IOV");
7049             return false;
7050         }
7051 
7052         if (params->sriov_max_vfs > NVME_MAX_VFS) {
7053             error_setg(errp, "sriov_max_vfs must be between 0 and %d",
7054                        NVME_MAX_VFS);
7055             return false;
7056         }
7057 
7058         if (params->cmb_size_mb) {
7059             error_setg(errp, "CMB is not supported with SR-IOV");
7060             return false;
7061         }
7062 
7063         if (n->pmr.dev) {
7064             error_setg(errp, "PMR is not supported with SR-IOV");
7065             return false;
7066         }
7067 
7068         if (!params->sriov_vq_flexible || !params->sriov_vi_flexible) {
7069             error_setg(errp, "both sriov_vq_flexible and sriov_vi_flexible"
7070                        " must be set for the use of SR-IOV");
7071             return false;
7072         }
7073 
7074         if (params->sriov_vq_flexible < params->sriov_max_vfs * 2) {
7075             error_setg(errp, "sriov_vq_flexible must be greater than or equal"
7076                        " to %d (sriov_max_vfs * 2)", params->sriov_max_vfs * 2);
7077             return false;
7078         }
7079 
7080         if (params->max_ioqpairs < params->sriov_vq_flexible + 2) {
7081             error_setg(errp, "(max_ioqpairs - sriov_vq_flexible) must be"
7082                        " greater than or equal to 2");
7083             return false;
7084         }
7085 
7086         if (params->sriov_vi_flexible < params->sriov_max_vfs) {
7087             error_setg(errp, "sriov_vi_flexible must be greater than or equal"
7088                        " to %d (sriov_max_vfs)", params->sriov_max_vfs);
7089             return false;
7090         }
7091 
7092         if (params->msix_qsize < params->sriov_vi_flexible + 1) {
7093             error_setg(errp, "(msix_qsize - sriov_vi_flexible) must be"
7094                        " greater than or equal to 1");
7095             return false;
7096         }
7097 
7098         if (params->sriov_max_vi_per_vf &&
7099             (params->sriov_max_vi_per_vf - 1) % NVME_VF_RES_GRANULARITY) {
7100             error_setg(errp, "sriov_max_vi_per_vf must meet:"
7101                        " (sriov_max_vi_per_vf - 1) %% %d == 0 and"
7102                        " sriov_max_vi_per_vf >= 1", NVME_VF_RES_GRANULARITY);
7103             return false;
7104         }
7105 
7106         if (params->sriov_max_vq_per_vf &&
7107             (params->sriov_max_vq_per_vf < 2 ||
7108              (params->sriov_max_vq_per_vf - 1) % NVME_VF_RES_GRANULARITY)) {
7109             error_setg(errp, "sriov_max_vq_per_vf must meet:"
7110                        " (sriov_max_vq_per_vf - 1) %% %d == 0 and"
7111                        " sriov_max_vq_per_vf >= 2", NVME_VF_RES_GRANULARITY);
7112             return false;
7113         }
7114     }
7115 
7116     return true;
7117 }
7118 
7119 static void nvme_init_state(NvmeCtrl *n)
7120 {
7121     NvmePriCtrlCap *cap = &n->pri_ctrl_cap;
7122     NvmeSecCtrlList *list = &n->sec_ctrl_list;
7123     NvmeSecCtrlEntry *sctrl;
7124     PCIDevice *pci = PCI_DEVICE(n);
7125     uint8_t max_vfs;
7126     int i;
7127 
7128     if (pci_is_vf(pci)) {
7129         sctrl = nvme_sctrl(n);
7130         max_vfs = 0;
7131         n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0;
7132         n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1;
7133     } else {
7134         max_vfs = n->params.sriov_max_vfs;
7135         n->conf_ioqpairs = n->params.max_ioqpairs;
7136         n->conf_msix_qsize = n->params.msix_qsize;
7137     }
7138 
7139     n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
7140     n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
7141     n->temperature = NVME_TEMPERATURE;
7142     n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING;
7143     n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
7144     n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
7145     QTAILQ_INIT(&n->aer_queue);
7146 
7147     list->numcntl = cpu_to_le16(max_vfs);
7148     for (i = 0; i < max_vfs; i++) {
7149         sctrl = &list->sec[i];
7150         sctrl->pcid = cpu_to_le16(n->cntlid);
7151         sctrl->vfn = cpu_to_le16(i + 1);
7152     }
7153 
7154     cap->cntlid = cpu_to_le16(n->cntlid);
7155     cap->crt = NVME_CRT_VQ | NVME_CRT_VI;
7156 
7157     if (pci_is_vf(pci)) {
7158         cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs);
7159     } else {
7160         cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs -
7161                                  n->params.sriov_vq_flexible);
7162         cap->vqfrt = cpu_to_le32(n->params.sriov_vq_flexible);
7163         cap->vqrfap = cap->vqfrt;
7164         cap->vqgran = cpu_to_le16(NVME_VF_RES_GRANULARITY);
7165         cap->vqfrsm = n->params.sriov_max_vq_per_vf ?
7166                         cpu_to_le16(n->params.sriov_max_vq_per_vf) :
7167                         cap->vqfrt / MAX(max_vfs, 1);
7168     }
7169 
7170     if (pci_is_vf(pci)) {
7171         cap->viprt = cpu_to_le16(n->conf_msix_qsize);
7172     } else {
7173         cap->viprt = cpu_to_le16(n->params.msix_qsize -
7174                                  n->params.sriov_vi_flexible);
7175         cap->vifrt = cpu_to_le32(n->params.sriov_vi_flexible);
7176         cap->virfap = cap->vifrt;
7177         cap->vigran = cpu_to_le16(NVME_VF_RES_GRANULARITY);
7178         cap->vifrsm = n->params.sriov_max_vi_per_vf ?
7179                         cpu_to_le16(n->params.sriov_max_vi_per_vf) :
7180                         cap->vifrt / MAX(max_vfs, 1);
7181     }
7182 }
7183 
7184 static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
7185 {
7186     uint64_t cmb_size = n->params.cmb_size_mb * MiB;
7187     uint64_t cap = ldq_le_p(&n->bar.cap);
7188 
7189     n->cmb.buf = g_malloc0(cmb_size);
7190     memory_region_init_io(&n->cmb.mem, OBJECT(n), &nvme_cmb_ops, n,
7191                           "nvme-cmb", cmb_size);
7192     pci_register_bar(pci_dev, NVME_CMB_BIR,
7193                      PCI_BASE_ADDRESS_SPACE_MEMORY |
7194                      PCI_BASE_ADDRESS_MEM_TYPE_64 |
7195                      PCI_BASE_ADDRESS_MEM_PREFETCH, &n->cmb.mem);
7196 
7197     NVME_CAP_SET_CMBS(cap, 1);
7198     stq_le_p(&n->bar.cap, cap);
7199 
7200     if (n->params.legacy_cmb) {
7201         nvme_cmb_enable_regs(n);
7202         n->cmb.cmse = true;
7203     }
7204 }
7205 
7206 static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev)
7207 {
7208     uint32_t pmrcap = ldl_le_p(&n->bar.pmrcap);
7209 
7210     NVME_PMRCAP_SET_RDS(pmrcap, 1);
7211     NVME_PMRCAP_SET_WDS(pmrcap, 1);
7212     NVME_PMRCAP_SET_BIR(pmrcap, NVME_PMR_BIR);
7213     /* Turn on bit 1 support */
7214     NVME_PMRCAP_SET_PMRWBM(pmrcap, 0x02);
7215     NVME_PMRCAP_SET_CMSS(pmrcap, 1);
7216     stl_le_p(&n->bar.pmrcap, pmrcap);
7217 
7218     pci_register_bar(pci_dev, NVME_PMR_BIR,
7219                      PCI_BASE_ADDRESS_SPACE_MEMORY |
7220                      PCI_BASE_ADDRESS_MEM_TYPE_64 |
7221                      PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmr.dev->mr);
7222 
7223     memory_region_set_enabled(&n->pmr.dev->mr, false);
7224 }
7225 
7226 static uint64_t nvme_bar_size(unsigned total_queues, unsigned total_irqs,
7227                               unsigned *msix_table_offset,
7228                               unsigned *msix_pba_offset)
7229 {
7230     uint64_t bar_size, msix_table_size, msix_pba_size;
7231 
7232     bar_size = sizeof(NvmeBar) + 2 * total_queues * NVME_DB_SIZE;
7233     bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB);
7234 
7235     if (msix_table_offset) {
7236         *msix_table_offset = bar_size;
7237     }
7238 
7239     msix_table_size = PCI_MSIX_ENTRY_SIZE * total_irqs;
7240     bar_size += msix_table_size;
7241     bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB);
7242 
7243     if (msix_pba_offset) {
7244         *msix_pba_offset = bar_size;
7245     }
7246 
7247     msix_pba_size = QEMU_ALIGN_UP(total_irqs, 64) / 8;
7248     bar_size += msix_pba_size;
7249 
7250     bar_size = pow2ceil(bar_size);
7251     return bar_size;
7252 }
7253 
7254 static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset)
7255 {
7256     uint16_t vf_dev_id = n->params.use_intel_id ?
7257                          PCI_DEVICE_ID_INTEL_NVME : PCI_DEVICE_ID_REDHAT_NVME;
7258     NvmePriCtrlCap *cap = &n->pri_ctrl_cap;
7259     uint64_t bar_size = nvme_bar_size(le16_to_cpu(cap->vqfrsm),
7260                                       le16_to_cpu(cap->vifrsm),
7261                                       NULL, NULL);
7262 
7263     pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id,
7264                        n->params.sriov_max_vfs, n->params.sriov_max_vfs,
7265                        NVME_VF_OFFSET, NVME_VF_STRIDE);
7266 
7267     pcie_sriov_pf_init_vf_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
7268                               PCI_BASE_ADDRESS_MEM_TYPE_64, bar_size);
7269 }
7270 
7271 static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset)
7272 {
7273     Error *err = NULL;
7274     int ret;
7275 
7276     ret = pci_add_capability(pci_dev, PCI_CAP_ID_PM, offset,
7277                              PCI_PM_SIZEOF, &err);
7278     if (err) {
7279         error_report_err(err);
7280         return ret;
7281     }
7282 
7283     pci_set_word(pci_dev->config + offset + PCI_PM_PMC,
7284                  PCI_PM_CAP_VER_1_2);
7285     pci_set_word(pci_dev->config + offset + PCI_PM_CTRL,
7286                  PCI_PM_CTRL_NO_SOFT_RESET);
7287     pci_set_word(pci_dev->wmask + offset + PCI_PM_CTRL,
7288                  PCI_PM_CTRL_STATE_MASK);
7289 
7290     return 0;
7291 }
7292 
7293 static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
7294 {
7295     ERRP_GUARD();
7296     uint8_t *pci_conf = pci_dev->config;
7297     uint64_t bar_size;
7298     unsigned msix_table_offset, msix_pba_offset;
7299     int ret;
7300 
7301     pci_conf[PCI_INTERRUPT_PIN] = 1;
7302     pci_config_set_prog_interface(pci_conf, 0x2);
7303 
7304     if (n->params.use_intel_id) {
7305         pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL);
7306         pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_INTEL_NVME);
7307     } else {
7308         pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REDHAT);
7309         pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_REDHAT_NVME);
7310     }
7311 
7312     pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS);
7313     nvme_add_pm_capability(pci_dev, 0x60);
7314     pcie_endpoint_cap_init(pci_dev, 0x80);
7315     pcie_cap_flr_init(pci_dev);
7316     if (n->params.sriov_max_vfs) {
7317         pcie_ari_init(pci_dev, 0x100, 1);
7318     }
7319 
7320     /* add one to max_ioqpairs to account for the admin queue pair */
7321     bar_size = nvme_bar_size(n->params.max_ioqpairs + 1, n->params.msix_qsize,
7322                              &msix_table_offset, &msix_pba_offset);
7323 
7324     memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size);
7325     memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme",
7326                           msix_table_offset);
7327     memory_region_add_subregion(&n->bar0, 0, &n->iomem);
7328 
7329     if (pci_is_vf(pci_dev)) {
7330         pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0);
7331     } else {
7332         pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
7333                          PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0);
7334     }
7335     ret = msix_init(pci_dev, n->params.msix_qsize,
7336                     &n->bar0, 0, msix_table_offset,
7337                     &n->bar0, 0, msix_pba_offset, 0, errp);
7338     if (ret == -ENOTSUP) {
7339         /* report that msix is not supported, but do not error out */
7340         warn_report_err(*errp);
7341         *errp = NULL;
7342     } else if (ret < 0) {
7343         /* propagate error to caller */
7344         return false;
7345     }
7346 
7347     nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize);
7348 
7349     if (n->params.cmb_size_mb) {
7350         nvme_init_cmb(n, pci_dev);
7351     }
7352 
7353     if (n->pmr.dev) {
7354         nvme_init_pmr(n, pci_dev);
7355     }
7356 
7357     if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) {
7358         nvme_init_sriov(n, pci_dev, 0x120);
7359     }
7360 
7361     return true;
7362 }
7363 
7364 static void nvme_init_subnqn(NvmeCtrl *n)
7365 {
7366     NvmeSubsystem *subsys = n->subsys;
7367     NvmeIdCtrl *id = &n->id_ctrl;
7368 
7369     if (!subsys) {
7370         snprintf((char *)id->subnqn, sizeof(id->subnqn),
7371                  "nqn.2019-08.org.qemu:%s", n->params.serial);
7372     } else {
7373         pstrcpy((char *)id->subnqn, sizeof(id->subnqn), (char*)subsys->subnqn);
7374     }
7375 }
7376 
7377 static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
7378 {
7379     NvmeIdCtrl *id = &n->id_ctrl;
7380     uint8_t *pci_conf = pci_dev->config;
7381     uint64_t cap = ldq_le_p(&n->bar.cap);
7382     NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
7383 
7384     id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
7385     id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
7386     strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
7387     strpadcpy((char *)id->fr, sizeof(id->fr), QEMU_VERSION, ' ');
7388     strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' ');
7389 
7390     id->cntlid = cpu_to_le16(n->cntlid);
7391 
7392     id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR);
7393     id->ctratt |= cpu_to_le32(NVME_CTRATT_ELBAS);
7394 
7395     id->rab = 6;
7396 
7397     if (n->params.use_intel_id) {
7398         id->ieee[0] = 0xb3;
7399         id->ieee[1] = 0x02;
7400         id->ieee[2] = 0x00;
7401     } else {
7402         id->ieee[0] = 0x00;
7403         id->ieee[1] = 0x54;
7404         id->ieee[2] = 0x52;
7405     }
7406 
7407     id->mdts = n->params.mdts;
7408     id->ver = cpu_to_le32(NVME_SPEC_VER);
7409     id->oacs =
7410         cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF);
7411     id->cntrltype = 0x1;
7412 
7413     /*
7414      * Because the controller always completes the Abort command immediately,
7415      * there can never be more than one concurrently executing Abort command,
7416      * so this value is never used for anything. Note that there can easily be
7417      * many Abort commands in the queues, but they are not considered
7418      * "executing" until processed by nvme_abort.
7419      *
7420      * The specification recommends a value of 3 for Abort Command Limit (four
7421      * concurrently outstanding Abort commands), so lets use that though it is
7422      * inconsequential.
7423      */
7424     id->acl = 3;
7425     id->aerl = n->params.aerl;
7426     id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO;
7427     id->lpa = NVME_LPA_NS_SMART | NVME_LPA_CSE | NVME_LPA_EXTENDED;
7428 
7429     /* recommended default value (~70 C) */
7430     id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING);
7431     id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL);
7432 
7433     id->sqes = (0x6 << 4) | 0x6;
7434     id->cqes = (0x4 << 4) | 0x4;
7435     id->nn = cpu_to_le32(NVME_MAX_NAMESPACES);
7436     id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP |
7437                            NVME_ONCS_FEATURES | NVME_ONCS_DSM |
7438                            NVME_ONCS_COMPARE | NVME_ONCS_COPY);
7439 
7440     /*
7441      * NOTE: If this device ever supports a command set that does NOT use 0x0
7442      * as a Flush-equivalent operation, support for the broadcast NSID in Flush
7443      * should probably be removed.
7444      *
7445      * See comment in nvme_io_cmd.
7446      */
7447     id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT;
7448 
7449     id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1);
7450     id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN);
7451 
7452     nvme_init_subnqn(n);
7453 
7454     id->psd[0].mp = cpu_to_le16(0x9c4);
7455     id->psd[0].enlat = cpu_to_le32(0x10);
7456     id->psd[0].exlat = cpu_to_le32(0x4);
7457 
7458     if (n->subsys) {
7459         id->cmic |= NVME_CMIC_MULTI_CTRL;
7460     }
7461 
7462     NVME_CAP_SET_MQES(cap, 0x7ff);
7463     NVME_CAP_SET_CQR(cap, 1);
7464     NVME_CAP_SET_TO(cap, 0xf);
7465     NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_NVM);
7466     NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_CSI_SUPP);
7467     NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_ADMIN_ONLY);
7468     NVME_CAP_SET_MPSMAX(cap, 4);
7469     NVME_CAP_SET_CMBS(cap, n->params.cmb_size_mb ? 1 : 0);
7470     NVME_CAP_SET_PMRS(cap, n->pmr.dev ? 1 : 0);
7471     stq_le_p(&n->bar.cap, cap);
7472 
7473     stl_le_p(&n->bar.vs, NVME_SPEC_VER);
7474     n->bar.intmc = n->bar.intms = 0;
7475 
7476     if (pci_is_vf(pci_dev) && !sctrl->scs) {
7477         stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
7478     }
7479 }
7480 
7481 static int nvme_init_subsys(NvmeCtrl *n, Error **errp)
7482 {
7483     int cntlid;
7484 
7485     if (!n->subsys) {
7486         return 0;
7487     }
7488 
7489     cntlid = nvme_subsys_register_ctrl(n, errp);
7490     if (cntlid < 0) {
7491         return -1;
7492     }
7493 
7494     n->cntlid = cntlid;
7495 
7496     return 0;
7497 }
7498 
7499 void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns)
7500 {
7501     uint32_t nsid = ns->params.nsid;
7502     assert(nsid && nsid <= NVME_MAX_NAMESPACES);
7503 
7504     n->namespaces[nsid] = ns;
7505     ns->attached++;
7506 
7507     n->dmrsl = MIN_NON_ZERO(n->dmrsl,
7508                             BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1));
7509 }
7510 
7511 static void nvme_realize(PCIDevice *pci_dev, Error **errp)
7512 {
7513     NvmeCtrl *n = NVME(pci_dev);
7514     DeviceState *dev = DEVICE(pci_dev);
7515     NvmeNamespace *ns;
7516     NvmeCtrl *pn = NVME(pcie_sriov_get_pf(pci_dev));
7517 
7518     if (pci_is_vf(pci_dev)) {
7519         /*
7520          * VFs derive settings from the parent. PF's lifespan exceeds
7521          * that of VF's, so it's safe to share params.serial.
7522          */
7523         memcpy(&n->params, &pn->params, sizeof(NvmeParams));
7524         n->subsys = pn->subsys;
7525     }
7526 
7527     if (!nvme_check_params(n, errp)) {
7528         return;
7529     }
7530 
7531     qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id);
7532 
7533     if (nvme_init_subsys(n, errp)) {
7534         return;
7535     }
7536     nvme_init_state(n);
7537     if (!nvme_init_pci(n, pci_dev, errp)) {
7538         return;
7539     }
7540     nvme_init_ctrl(n, pci_dev);
7541 
7542     /* setup a namespace if the controller drive property was given */
7543     if (n->namespace.blkconf.blk) {
7544         ns = &n->namespace;
7545         ns->params.nsid = 1;
7546 
7547         if (nvme_ns_setup(ns, errp)) {
7548             return;
7549         }
7550 
7551         nvme_attach_ns(n, ns);
7552     }
7553 }
7554 
7555 static void nvme_exit(PCIDevice *pci_dev)
7556 {
7557     NvmeCtrl *n = NVME(pci_dev);
7558     NvmeNamespace *ns;
7559     int i;
7560 
7561     nvme_ctrl_reset(n, NVME_RESET_FUNCTION);
7562 
7563     if (n->subsys) {
7564         for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
7565             ns = nvme_ns(n, i);
7566             if (ns) {
7567                 ns->attached--;
7568             }
7569         }
7570 
7571         nvme_subsys_unregister_ctrl(n->subsys, n);
7572     }
7573 
7574     g_free(n->cq);
7575     g_free(n->sq);
7576     g_free(n->aer_reqs);
7577 
7578     if (n->params.cmb_size_mb) {
7579         g_free(n->cmb.buf);
7580     }
7581 
7582     if (n->pmr.dev) {
7583         host_memory_backend_set_mapped(n->pmr.dev, false);
7584     }
7585 
7586     if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) {
7587         pcie_sriov_pf_exit(pci_dev);
7588     }
7589 
7590     msix_uninit(pci_dev, &n->bar0, &n->bar0);
7591     memory_region_del_subregion(&n->bar0, &n->iomem);
7592 }
7593 
7594 static Property nvme_props[] = {
7595     DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf),
7596     DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmr.dev, TYPE_MEMORY_BACKEND,
7597                      HostMemoryBackend *),
7598     DEFINE_PROP_LINK("subsys", NvmeCtrl, subsys, TYPE_NVME_SUBSYS,
7599                      NvmeSubsystem *),
7600     DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial),
7601     DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0),
7602     DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0),
7603     DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64),
7604     DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65),
7605     DEFINE_PROP_UINT8("aerl", NvmeCtrl, params.aerl, 3),
7606     DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64),
7607     DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7),
7608     DEFINE_PROP_UINT8("vsl", NvmeCtrl, params.vsl, 7),
7609     DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
7610     DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false),
7611     DEFINE_PROP_BOOL("ioeventfd", NvmeCtrl, params.ioeventfd, false),
7612     DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0),
7613     DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl,
7614                      params.auto_transition_zones, true),
7615     DEFINE_PROP_UINT8("sriov_max_vfs", NvmeCtrl, params.sriov_max_vfs, 0),
7616     DEFINE_PROP_UINT16("sriov_vq_flexible", NvmeCtrl,
7617                        params.sriov_vq_flexible, 0),
7618     DEFINE_PROP_UINT16("sriov_vi_flexible", NvmeCtrl,
7619                        params.sriov_vi_flexible, 0),
7620     DEFINE_PROP_UINT8("sriov_max_vi_per_vf", NvmeCtrl,
7621                       params.sriov_max_vi_per_vf, 0),
7622     DEFINE_PROP_UINT8("sriov_max_vq_per_vf", NvmeCtrl,
7623                       params.sriov_max_vq_per_vf, 0),
7624     DEFINE_PROP_END_OF_LIST(),
7625 };
7626 
7627 static void nvme_get_smart_warning(Object *obj, Visitor *v, const char *name,
7628                                    void *opaque, Error **errp)
7629 {
7630     NvmeCtrl *n = NVME(obj);
7631     uint8_t value = n->smart_critical_warning;
7632 
7633     visit_type_uint8(v, name, &value, errp);
7634 }
7635 
7636 static void nvme_set_smart_warning(Object *obj, Visitor *v, const char *name,
7637                                    void *opaque, Error **errp)
7638 {
7639     NvmeCtrl *n = NVME(obj);
7640     uint8_t value, old_value, cap = 0, index, event;
7641 
7642     if (!visit_type_uint8(v, name, &value, errp)) {
7643         return;
7644     }
7645 
7646     cap = NVME_SMART_SPARE | NVME_SMART_TEMPERATURE | NVME_SMART_RELIABILITY
7647           | NVME_SMART_MEDIA_READ_ONLY | NVME_SMART_FAILED_VOLATILE_MEDIA;
7648     if (NVME_CAP_PMRS(ldq_le_p(&n->bar.cap))) {
7649         cap |= NVME_SMART_PMR_UNRELIABLE;
7650     }
7651 
7652     if ((value & cap) != value) {
7653         error_setg(errp, "unsupported smart critical warning bits: 0x%x",
7654                    value & ~cap);
7655         return;
7656     }
7657 
7658     old_value = n->smart_critical_warning;
7659     n->smart_critical_warning = value;
7660 
7661     /* only inject new bits of smart critical warning */
7662     for (index = 0; index < NVME_SMART_WARN_MAX; index++) {
7663         event = 1 << index;
7664         if (value & ~old_value & event)
7665             nvme_smart_event(n, event);
7666     }
7667 }
7668 
7669 static void nvme_pci_reset(DeviceState *qdev)
7670 {
7671     PCIDevice *pci_dev = PCI_DEVICE(qdev);
7672     NvmeCtrl *n = NVME(pci_dev);
7673 
7674     trace_pci_nvme_pci_reset();
7675     nvme_ctrl_reset(n, NVME_RESET_FUNCTION);
7676 }
7677 
7678 static void nvme_sriov_pre_write_ctrl(PCIDevice *dev, uint32_t address,
7679                                       uint32_t val, int len)
7680 {
7681     NvmeCtrl *n = NVME(dev);
7682     NvmeSecCtrlEntry *sctrl;
7683     uint16_t sriov_cap = dev->exp.sriov_cap;
7684     uint32_t off = address - sriov_cap;
7685     int i, num_vfs;
7686 
7687     if (!sriov_cap) {
7688         return;
7689     }
7690 
7691     if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) {
7692         if (!(val & PCI_SRIOV_CTRL_VFE)) {
7693             num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF);
7694             for (i = 0; i < num_vfs; i++) {
7695                 sctrl = &n->sec_ctrl_list.sec[i];
7696                 nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false);
7697             }
7698         }
7699     }
7700 }
7701 
7702 static void nvme_pci_write_config(PCIDevice *dev, uint32_t address,
7703                                   uint32_t val, int len)
7704 {
7705     nvme_sriov_pre_write_ctrl(dev, address, val, len);
7706     pci_default_write_config(dev, address, val, len);
7707     pcie_cap_flr_write_config(dev, address, val, len);
7708 }
7709 
7710 static const VMStateDescription nvme_vmstate = {
7711     .name = "nvme",
7712     .unmigratable = 1,
7713 };
7714 
7715 static void nvme_class_init(ObjectClass *oc, void *data)
7716 {
7717     DeviceClass *dc = DEVICE_CLASS(oc);
7718     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
7719 
7720     pc->realize = nvme_realize;
7721     pc->config_write = nvme_pci_write_config;
7722     pc->exit = nvme_exit;
7723     pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
7724     pc->revision = 2;
7725 
7726     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
7727     dc->desc = "Non-Volatile Memory Express";
7728     device_class_set_props(dc, nvme_props);
7729     dc->vmsd = &nvme_vmstate;
7730     dc->reset = nvme_pci_reset;
7731 }
7732 
7733 static void nvme_instance_init(Object *obj)
7734 {
7735     NvmeCtrl *n = NVME(obj);
7736 
7737     device_add_bootindex_property(obj, &n->namespace.blkconf.bootindex,
7738                                   "bootindex", "/namespace@1,0",
7739                                   DEVICE(obj));
7740 
7741     object_property_add(obj, "smart_critical_warning", "uint8",
7742                         nvme_get_smart_warning,
7743                         nvme_set_smart_warning, NULL, NULL);
7744 }
7745 
7746 static const TypeInfo nvme_info = {
7747     .name          = TYPE_NVME,
7748     .parent        = TYPE_PCI_DEVICE,
7749     .instance_size = sizeof(NvmeCtrl),
7750     .instance_init = nvme_instance_init,
7751     .class_init    = nvme_class_init,
7752     .interfaces = (InterfaceInfo[]) {
7753         { INTERFACE_PCIE_DEVICE },
7754         { }
7755     },
7756 };
7757 
7758 static const TypeInfo nvme_bus_info = {
7759     .name = TYPE_NVME_BUS,
7760     .parent = TYPE_BUS,
7761     .instance_size = sizeof(NvmeBus),
7762 };
7763 
7764 static void nvme_register_types(void)
7765 {
7766     type_register_static(&nvme_info);
7767     type_register_static(&nvme_bus_info);
7768 }
7769 
7770 type_init(nvme_register_types)
7771