xref: /openbmc/qemu/hw/nvme/nvme.h (revision 89f7d4fb13d9ee2263c98b300b67e06accff89e2)
1 /*
2  * QEMU NVM Express
3  *
4  * Copyright (c) 2012 Intel Corporation
5  * Copyright (c) 2021 Minwoo Im
6  * Copyright (c) 2021 Samsung Electronics Co., Ltd.
7  *
8  * Authors:
9  *   Keith Busch            <kbusch@kernel.org>
10  *   Klaus Jensen           <k.jensen@samsung.com>
11  *   Gollu Appalanaidu      <anaidu.gollu@samsung.com>
12  *   Dmitry Fomichev        <dmitry.fomichev@wdc.com>
13  *   Minwoo Im              <minwoo.im.dev@gmail.com>
14  *
15  * This code is licensed under the GNU GPL v2 or later.
16  */
17 
18 #ifndef HW_NVME_NVME_H
19 #define HW_NVME_NVME_H
20 
21 #include "qemu/uuid.h"
22 #include "hw/pci/pci_device.h"
23 #include "hw/block/block.h"
24 
25 #include "block/nvme.h"
26 
27 #define NVME_MAX_CONTROLLERS 256
28 #define NVME_MAX_NAMESPACES  256
29 #define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000)
30 #define NVME_FDP_MAX_EVENTS 63
31 #define NVME_FDP_MAXPIDS 128
32 
33 /*
34  * The controller only supports Submission and Completion Queue Entry Sizes of
35  * 64 and 16 bytes respectively.
36  */
37 #define NVME_SQES 6
38 #define NVME_CQES 4
39 
40 QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1);
41 
42 typedef struct NvmeCtrl NvmeCtrl;
43 typedef struct NvmeNamespace NvmeNamespace;
44 
45 #define TYPE_NVME_BUS "nvme-bus"
46 OBJECT_DECLARE_SIMPLE_TYPE(NvmeBus, NVME_BUS)
47 
48 typedef struct NvmeBus {
49     BusState parent_bus;
50 } NvmeBus;
51 
52 #define TYPE_NVME_SUBSYS "nvme-subsys"
53 #define NVME_SUBSYS(obj) \
54     OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
55 #define SUBSYS_SLOT_RSVD (void *)0xFFFF
56 
57 typedef struct NvmeReclaimUnit {
58     uint64_t ruamw;
59 } NvmeReclaimUnit;
60 
61 typedef struct NvmeRuHandle {
62     uint8_t  ruht;
63     uint8_t  ruha;
64     uint64_t event_filter;
65     uint8_t  lbafi;
66     uint64_t ruamw;
67 
68     /* reclaim units indexed by reclaim group */
69     NvmeReclaimUnit *rus;
70 } NvmeRuHandle;
71 
72 typedef struct NvmeFdpEventBuffer {
73     NvmeFdpEvent     events[NVME_FDP_MAX_EVENTS];
74     unsigned int     nelems;
75     unsigned int     start;
76     unsigned int     next;
77 } NvmeFdpEventBuffer;
78 
79 typedef struct NvmeEnduranceGroup {
80     uint8_t event_conf;
81 
82     struct {
83         NvmeFdpEventBuffer host_events, ctrl_events;
84 
85         uint16_t nruh;
86         uint16_t nrg;
87         uint8_t  rgif;
88         uint64_t runs;
89 
90         uint64_t hbmw;
91         uint64_t mbmw;
92         uint64_t mbe;
93 
94         bool enabled;
95 
96         NvmeRuHandle *ruhs;
97     } fdp;
98 } NvmeEnduranceGroup;
99 
100 typedef struct NvmeSubsystem {
101     DeviceState parent_obj;
102     NvmeBus     bus;
103     uint8_t     subnqn[256];
104     char        *serial;
105 
106     NvmeCtrl           *ctrls[NVME_MAX_CONTROLLERS];
107     NvmeNamespace      *namespaces[NVME_MAX_NAMESPACES + 1];
108     NvmeEnduranceGroup endgrp;
109 
110     struct {
111         char *nqn;
112 
113         struct {
114             bool     enabled;
115             uint64_t runs;
116             uint16_t nruh;
117             uint32_t nrg;
118         } fdp;
119     } params;
120 } NvmeSubsystem;
121 
122 int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp);
123 void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n);
124 
nvme_subsys_ctrl(NvmeSubsystem * subsys,uint32_t cntlid)125 static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys,
126                                          uint32_t cntlid)
127 {
128     if (!subsys || cntlid >= NVME_MAX_CONTROLLERS) {
129         return NULL;
130     }
131 
132     if (subsys->ctrls[cntlid] == SUBSYS_SLOT_RSVD) {
133         return NULL;
134     }
135 
136     return subsys->ctrls[cntlid];
137 }
138 
nvme_subsys_ns(NvmeSubsystem * subsys,uint32_t nsid)139 static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys,
140                                             uint32_t nsid)
141 {
142     if (!subsys || !nsid || nsid > NVME_MAX_NAMESPACES) {
143         return NULL;
144     }
145 
146     return subsys->namespaces[nsid];
147 }
148 
149 #define TYPE_NVME_NS "nvme-ns"
150 #define NVME_NS(obj) \
151     OBJECT_CHECK(NvmeNamespace, (obj), TYPE_NVME_NS)
152 
153 typedef struct NvmeZone {
154     NvmeZoneDescr   d;
155     uint64_t        w_ptr;
156     QTAILQ_ENTRY(NvmeZone) entry;
157 } NvmeZone;
158 
159 #define FDP_EVT_MAX 0xff
160 #define NVME_FDP_MAX_NS_RUHS 32u
161 #define FDPVSS 0
162 
163 static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = {
164     /* Host events */
165     [FDP_EVT_RU_NOT_FULLY_WRITTEN]      = 0,
166     [FDP_EVT_RU_ATL_EXCEEDED]           = 1,
167     [FDP_EVT_CTRL_RESET_RUH]            = 2,
168     [FDP_EVT_INVALID_PID]               = 3,
169     /* CTRL events */
170     [FDP_EVT_MEDIA_REALLOC]             = 32,
171     [FDP_EVT_RUH_IMPLICIT_RU_CHANGE]    = 33,
172 };
173 
174 #define NGUID_LEN 16
175 
176 typedef struct {
177     uint8_t data[NGUID_LEN];
178 } NvmeNGUID;
179 
180 bool nvme_nguid_is_null(const NvmeNGUID *nguid);
181 
182 extern const PropertyInfo qdev_prop_nguid;
183 
184 #define DEFINE_PROP_NGUID_NODEFAULT(_name, _state, _field) \
185     DEFINE_PROP(_name, _state, _field, qdev_prop_nguid, NvmeNGUID)
186 
187 typedef struct NvmeNamespaceParams {
188     bool      detached;
189     bool      shared;
190     uint32_t  nsid;
191     QemuUUID  uuid;
192     NvmeNGUID nguid;
193     uint64_t  eui64;
194     bool      eui64_default;
195 
196     uint16_t ms;
197     uint8_t  mset;
198     uint8_t  pi;
199     uint8_t  pil;
200     uint8_t  pif;
201 
202     uint16_t mssrl;
203     uint32_t mcl;
204     uint8_t  msrc;
205 
206     bool     zoned;
207     bool     cross_zone_read;
208     uint64_t zone_size_bs;
209     uint64_t zone_cap_bs;
210     uint32_t max_active_zones;
211     uint32_t max_open_zones;
212     uint32_t zd_extension_size;
213 
214     uint32_t numzrwa;
215     uint64_t zrwas;
216     uint64_t zrwafg;
217 
218     struct {
219         char *ruhs;
220     } fdp;
221 
222     struct {
223         uint16_t nawun;
224         uint16_t nawupf;
225         uint16_t nabsn;
226         uint16_t nabspf;
227         uint16_t nabo;
228     } atomic;
229 } NvmeNamespaceParams;
230 
231 typedef struct NvmeAtomic {
232     uint32_t    atomic_max_write_size;
233     uint64_t    atomic_boundary;
234     uint64_t    atomic_nabo;
235     bool        atomic_writes;
236 } NvmeAtomic;
237 
238 typedef struct NvmeNamespace {
239     DeviceState  parent_obj;
240     BlockConf    blkconf;
241     int32_t      bootindex;
242     char         bootindex_suffix[24];
243     int64_t      size;
244     int64_t      moff;
245     NvmeIdNs     id_ns;
246     NvmeIdNsNvm  id_ns_nvm;
247     NvmeIdNsInd  id_ns_ind;
248     NvmeLBAF     lbaf;
249     unsigned int nlbaf;
250     size_t       lbasz;
251     uint8_t      csi;
252     uint16_t     status;
253     int          attached;
254     uint8_t      pif;
255 
256     struct {
257         uint16_t zrwas;
258         uint16_t zrwafg;
259         uint32_t numzrwa;
260     } zns;
261 
262     QTAILQ_ENTRY(NvmeNamespace) entry;
263 
264     NvmeIdNsZoned   *id_ns_zoned;
265     NvmeZone        *zone_array;
266     QTAILQ_HEAD(, NvmeZone) exp_open_zones;
267     QTAILQ_HEAD(, NvmeZone) imp_open_zones;
268     QTAILQ_HEAD(, NvmeZone) closed_zones;
269     QTAILQ_HEAD(, NvmeZone) full_zones;
270     uint32_t        num_zones;
271     uint64_t        zone_size;
272     uint64_t        zone_capacity;
273     uint32_t        zone_size_log2;
274     uint8_t         *zd_extensions;
275     int32_t         nr_open_zones;
276     int32_t         nr_active_zones;
277 
278     NvmeNamespaceParams params;
279     NvmeSubsystem *subsys;
280     NvmeEnduranceGroup *endgrp;
281 
282     /* NULL for shared namespaces; set to specific controller if private */
283     NvmeCtrl *ctrl;
284 
285     struct {
286         uint32_t err_rec;
287     } features;
288 
289     struct {
290         uint16_t nphs;
291         /* reclaim unit handle identifiers indexed by placement handle */
292         uint16_t *phs;
293     } fdp;
294 
295     NvmeAtomic  atomic;
296 } NvmeNamespace;
297 
nvme_nsid(NvmeNamespace * ns)298 static inline uint32_t nvme_nsid(NvmeNamespace *ns)
299 {
300     if (ns) {
301         return ns->params.nsid;
302     }
303 
304     return 0;
305 }
306 
nvme_l2b(NvmeNamespace * ns,uint64_t lba)307 static inline size_t nvme_l2b(NvmeNamespace *ns, uint64_t lba)
308 {
309     return lba << ns->lbaf.ds;
310 }
311 
nvme_m2b(NvmeNamespace * ns,uint64_t lba)312 static inline size_t nvme_m2b(NvmeNamespace *ns, uint64_t lba)
313 {
314     return ns->lbaf.ms * lba;
315 }
316 
nvme_moff(NvmeNamespace * ns,uint64_t lba)317 static inline int64_t nvme_moff(NvmeNamespace *ns, uint64_t lba)
318 {
319     return ns->moff + nvme_m2b(ns, lba);
320 }
321 
nvme_ns_ext(NvmeNamespace * ns)322 static inline bool nvme_ns_ext(NvmeNamespace *ns)
323 {
324     return !!NVME_ID_NS_FLBAS_EXTENDED(ns->id_ns.flbas);
325 }
326 
nvme_get_zone_state(NvmeZone * zone)327 static inline NvmeZoneState nvme_get_zone_state(NvmeZone *zone)
328 {
329     return zone->d.zs >> 4;
330 }
331 
nvme_set_zone_state(NvmeZone * zone,NvmeZoneState state)332 static inline void nvme_set_zone_state(NvmeZone *zone, NvmeZoneState state)
333 {
334     zone->d.zs = state << 4;
335 }
336 
nvme_zone_rd_boundary(NvmeNamespace * ns,NvmeZone * zone)337 static inline uint64_t nvme_zone_rd_boundary(NvmeNamespace *ns, NvmeZone *zone)
338 {
339     return zone->d.zslba + ns->zone_size;
340 }
341 
nvme_zone_wr_boundary(NvmeZone * zone)342 static inline uint64_t nvme_zone_wr_boundary(NvmeZone *zone)
343 {
344     return zone->d.zslba + zone->d.zcap;
345 }
346 
nvme_wp_is_valid(NvmeZone * zone)347 static inline bool nvme_wp_is_valid(NvmeZone *zone)
348 {
349     uint8_t st = nvme_get_zone_state(zone);
350 
351     return st != NVME_ZONE_STATE_FULL &&
352            st != NVME_ZONE_STATE_READ_ONLY &&
353            st != NVME_ZONE_STATE_OFFLINE;
354 }
355 
nvme_get_zd_extension(NvmeNamespace * ns,uint32_t zone_idx)356 static inline uint8_t *nvme_get_zd_extension(NvmeNamespace *ns,
357                                              uint32_t zone_idx)
358 {
359     return &ns->zd_extensions[zone_idx * ns->params.zd_extension_size];
360 }
361 
nvme_aor_inc_open(NvmeNamespace * ns)362 static inline void nvme_aor_inc_open(NvmeNamespace *ns)
363 {
364     assert(ns->nr_open_zones >= 0);
365     if (ns->params.max_open_zones) {
366         ns->nr_open_zones++;
367         assert(ns->nr_open_zones <= ns->params.max_open_zones);
368     }
369 }
370 
nvme_aor_dec_open(NvmeNamespace * ns)371 static inline void nvme_aor_dec_open(NvmeNamespace *ns)
372 {
373     if (ns->params.max_open_zones) {
374         assert(ns->nr_open_zones > 0);
375         ns->nr_open_zones--;
376     }
377     assert(ns->nr_open_zones >= 0);
378 }
379 
nvme_aor_inc_active(NvmeNamespace * ns)380 static inline void nvme_aor_inc_active(NvmeNamespace *ns)
381 {
382     assert(ns->nr_active_zones >= 0);
383     if (ns->params.max_active_zones) {
384         ns->nr_active_zones++;
385         assert(ns->nr_active_zones <= ns->params.max_active_zones);
386     }
387 }
388 
nvme_aor_dec_active(NvmeNamespace * ns)389 static inline void nvme_aor_dec_active(NvmeNamespace *ns)
390 {
391     if (ns->params.max_active_zones) {
392         assert(ns->nr_active_zones > 0);
393         ns->nr_active_zones--;
394         assert(ns->nr_active_zones >= ns->nr_open_zones);
395     }
396     assert(ns->nr_active_zones >= 0);
397 }
398 
nvme_fdp_stat_inc(uint64_t * a,uint64_t b)399 static inline void nvme_fdp_stat_inc(uint64_t *a, uint64_t b)
400 {
401     uint64_t ret = *a + b;
402     *a = ret < *a ? UINT64_MAX : ret;
403 }
404 
405 void nvme_ns_init_format(NvmeNamespace *ns);
406 int nvme_ns_setup(NvmeNamespace *ns, Error **errp);
407 void nvme_ns_drain(NvmeNamespace *ns);
408 void nvme_ns_shutdown(NvmeNamespace *ns);
409 void nvme_ns_cleanup(NvmeNamespace *ns);
410 
411 typedef struct NvmeAsyncEvent {
412     QTAILQ_ENTRY(NvmeAsyncEvent) entry;
413     NvmeAerResult result;
414 } NvmeAsyncEvent;
415 
416 enum {
417     NVME_SG_ALLOC = 1 << 0,
418     NVME_SG_DMA   = 1 << 1,
419 };
420 
421 typedef struct NvmeSg {
422     int flags;
423 
424     union {
425         QEMUSGList   qsg;
426         QEMUIOVector iov;
427     };
428 } NvmeSg;
429 
430 typedef enum NvmeTxDirection {
431     NVME_TX_DIRECTION_TO_DEVICE   = 0,
432     NVME_TX_DIRECTION_FROM_DEVICE = 1,
433 } NvmeTxDirection;
434 
435 typedef struct NvmeRequest {
436     struct NvmeSQueue       *sq;
437     struct NvmeNamespace    *ns;
438     BlockAIOCB              *aiocb;
439     uint16_t                status;
440     void                    *opaque;
441     NvmeCqe                 cqe;
442     NvmeCmd                 cmd;
443     BlockAcctCookie         acct;
444     NvmeSg                  sg;
445     bool                    atomic_write;
446     QTAILQ_ENTRY(NvmeRequest)entry;
447 } NvmeRequest;
448 
449 typedef struct NvmeBounceContext {
450     NvmeRequest *req;
451 
452     struct {
453         QEMUIOVector iov;
454         uint8_t *bounce;
455     } data, mdata;
456 } NvmeBounceContext;
457 
nvme_adm_opc_str(uint8_t opc)458 static inline const char *nvme_adm_opc_str(uint8_t opc)
459 {
460     switch (opc) {
461     case NVME_ADM_CMD_DELETE_SQ:        return "NVME_ADM_CMD_DELETE_SQ";
462     case NVME_ADM_CMD_CREATE_SQ:        return "NVME_ADM_CMD_CREATE_SQ";
463     case NVME_ADM_CMD_GET_LOG_PAGE:     return "NVME_ADM_CMD_GET_LOG_PAGE";
464     case NVME_ADM_CMD_DELETE_CQ:        return "NVME_ADM_CMD_DELETE_CQ";
465     case NVME_ADM_CMD_CREATE_CQ:        return "NVME_ADM_CMD_CREATE_CQ";
466     case NVME_ADM_CMD_IDENTIFY:         return "NVME_ADM_CMD_IDENTIFY";
467     case NVME_ADM_CMD_ABORT:            return "NVME_ADM_CMD_ABORT";
468     case NVME_ADM_CMD_SET_FEATURES:     return "NVME_ADM_CMD_SET_FEATURES";
469     case NVME_ADM_CMD_GET_FEATURES:     return "NVME_ADM_CMD_GET_FEATURES";
470     case NVME_ADM_CMD_ASYNC_EV_REQ:     return "NVME_ADM_CMD_ASYNC_EV_REQ";
471     case NVME_ADM_CMD_NS_ATTACHMENT:    return "NVME_ADM_CMD_NS_ATTACHMENT";
472     case NVME_ADM_CMD_DIRECTIVE_SEND:   return "NVME_ADM_CMD_DIRECTIVE_SEND";
473     case NVME_ADM_CMD_VIRT_MNGMT:       return "NVME_ADM_CMD_VIRT_MNGMT";
474     case NVME_ADM_CMD_DIRECTIVE_RECV:   return "NVME_ADM_CMD_DIRECTIVE_RECV";
475     case NVME_ADM_CMD_DBBUF_CONFIG:     return "NVME_ADM_CMD_DBBUF_CONFIG";
476     case NVME_ADM_CMD_FORMAT_NVM:       return "NVME_ADM_CMD_FORMAT_NVM";
477     case NVME_ADM_CMD_SECURITY_SEND:    return "NVME_ADM_CMD_SECURITY_SEND";
478     case NVME_ADM_CMD_SECURITY_RECV:    return "NVME_ADM_CMD_SECURITY_RECV";
479     default:                            return "NVME_ADM_CMD_UNKNOWN";
480     }
481 }
482 
nvme_io_opc_str(uint8_t opc)483 static inline const char *nvme_io_opc_str(uint8_t opc)
484 {
485     switch (opc) {
486     case NVME_CMD_FLUSH:            return "NVME_NVM_CMD_FLUSH";
487     case NVME_CMD_WRITE:            return "NVME_NVM_CMD_WRITE";
488     case NVME_CMD_READ:             return "NVME_NVM_CMD_READ";
489     case NVME_CMD_COMPARE:          return "NVME_NVM_CMD_COMPARE";
490     case NVME_CMD_WRITE_ZEROES:     return "NVME_NVM_CMD_WRITE_ZEROES";
491     case NVME_CMD_DSM:              return "NVME_NVM_CMD_DSM";
492     case NVME_CMD_VERIFY:           return "NVME_NVM_CMD_VERIFY";
493     case NVME_CMD_COPY:             return "NVME_NVM_CMD_COPY";
494     case NVME_CMD_ZONE_MGMT_SEND:   return "NVME_ZONED_CMD_MGMT_SEND";
495     case NVME_CMD_ZONE_MGMT_RECV:   return "NVME_ZONED_CMD_MGMT_RECV";
496     case NVME_CMD_ZONE_APPEND:      return "NVME_ZONED_CMD_ZONE_APPEND";
497     default:                        return "NVME_NVM_CMD_UNKNOWN";
498     }
499 }
500 
501 typedef struct NvmeSQueue {
502     struct NvmeCtrl *ctrl;
503     uint16_t    sqid;
504     uint16_t    cqid;
505     uint32_t    head;
506     uint32_t    tail;
507     uint32_t    size;
508     uint64_t    dma_addr;
509     uint64_t    db_addr;
510     uint64_t    ei_addr;
511     QEMUBH      *bh;
512     EventNotifier notifier;
513     bool        ioeventfd_enabled;
514     NvmeRequest *io_req;
515     QTAILQ_HEAD(, NvmeRequest) req_list;
516     QTAILQ_HEAD(, NvmeRequest) out_req_list;
517     QTAILQ_ENTRY(NvmeSQueue) entry;
518 } NvmeSQueue;
519 
520 typedef struct NvmeCQueue {
521     struct NvmeCtrl *ctrl;
522     uint8_t     phase;
523     uint16_t    cqid;
524     uint16_t    irq_enabled;
525     uint32_t    head;
526     uint32_t    tail;
527     uint32_t    vector;
528     uint32_t    size;
529     uint64_t    dma_addr;
530     uint64_t    db_addr;
531     uint64_t    ei_addr;
532     QEMUBH      *bh;
533     EventNotifier notifier;
534     bool        ioeventfd_enabled;
535     QTAILQ_HEAD(, NvmeSQueue) sq_list;
536     QTAILQ_HEAD(, NvmeRequest) req_list;
537 } NvmeCQueue;
538 
539 #define TYPE_NVME "nvme"
540 #define NVME(obj) \
541         OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME)
542 
543 typedef struct NvmeParams {
544     char     *serial;
545     uint32_t num_queues; /* deprecated since 5.1 */
546     uint32_t max_ioqpairs;
547     uint16_t msix_qsize;
548     uint16_t mqes;
549     uint32_t cmb_size_mb;
550     uint8_t  aerl;
551     uint32_t aer_max_queued;
552     uint8_t  mdts;
553     uint8_t  vsl;
554     bool     use_intel_id;
555     uint8_t  zasl;
556     bool     auto_transition_zones;
557     bool     legacy_cmb;
558     bool     ioeventfd;
559     bool     dbcs;
560     uint16_t  sriov_max_vfs;
561     uint16_t sriov_vq_flexible;
562     uint16_t sriov_vi_flexible;
563     uint32_t  sriov_max_vq_per_vf;
564     uint32_t  sriov_max_vi_per_vf;
565     bool     msix_exclusive_bar;
566     bool     ocp;
567 
568     struct {
569         bool mem;
570     } ctratt;
571 
572     uint16_t atomic_awun;
573     uint16_t atomic_awupf;
574     bool     atomic_dn;
575 } NvmeParams;
576 
577 typedef struct NvmeCtrl {
578     PCIDevice    parent_obj;
579     MemoryRegion bar0;
580     MemoryRegion iomem;
581     NvmeBar      bar;
582     NvmeParams   params;
583     NvmeBus      bus;
584 
585     uint16_t    cntlid;
586     bool        qs_created;
587     uint32_t    page_size;
588     uint16_t    page_bits;
589     uint16_t    max_prp_ents;
590     uint32_t    max_q_ents;
591     uint8_t     outstanding_aers;
592     uint32_t    irq_status;
593     int         cq_pending;
594     uint64_t    host_timestamp;                 /* Timestamp sent by the host */
595     uint64_t    timestamp_set_qemu_clock_ms;    /* QEMU clock time */
596     uint64_t    starttime_ms;
597     uint16_t    temperature;
598     uint8_t     smart_critical_warning;
599     uint32_t    conf_msix_qsize;
600     uint32_t    conf_ioqpairs;
601     uint64_t    dbbuf_dbs;
602     uint64_t    dbbuf_eis;
603     bool        dbbuf_enabled;
604 
605     struct {
606         uint32_t acs[256];
607         struct {
608             uint32_t nvm[256];
609             uint32_t zoned[256];
610         } iocs;
611     } cse;
612 
613     struct {
614         MemoryRegion mem;
615         uint8_t      *buf;
616         bool         cmse;
617         hwaddr       cba;
618     } cmb;
619 
620     struct {
621         HostMemoryBackend *dev;
622         bool              cmse;
623         hwaddr            cba;
624     } pmr;
625 
626     uint8_t     aer_mask;
627     NvmeRequest **aer_reqs;
628     QTAILQ_HEAD(, NvmeAsyncEvent) aer_queue;
629     int         aer_queued;
630 
631     uint32_t    dmrsl;
632 
633     /* Namespace ID is started with 1 so bitmap should be 1-based */
634 #define NVME_CHANGED_NSID_SIZE  (NVME_MAX_NAMESPACES + 1)
635     DECLARE_BITMAP(changed_nsids, NVME_CHANGED_NSID_SIZE);
636 
637     NvmeSubsystem   *subsys;
638 
639     NvmeNamespace   namespace;
640     NvmeNamespace   *namespaces[NVME_MAX_NAMESPACES + 1];
641     NvmeSQueue      **sq;
642     NvmeCQueue      **cq;
643     NvmeSQueue      admin_sq;
644     NvmeCQueue      admin_cq;
645     NvmeIdCtrl      id_ctrl;
646 
647     struct {
648         struct {
649             uint16_t temp_thresh_hi;
650             uint16_t temp_thresh_low;
651         };
652 
653         uint32_t                async_config;
654         NvmeHostBehaviorSupport hbs;
655     } features;
656 
657     NvmePriCtrlCap  pri_ctrl_cap;
658     uint32_t nr_sec_ctrls;
659     NvmeSecCtrlEntry *sec_ctrl_list;
660     struct {
661         uint16_t    vqrfap;
662         uint16_t    virfap;
663     } next_pri_ctrl_cap;    /* These override pri_ctrl_cap after reset */
664     uint32_t    dn; /* Disable Normal */
665     NvmeAtomic  atomic;
666 
667     /* Socket mapping to SPDM over NVMe Security In/Out commands */
668     int spdm_socket;
669 } NvmeCtrl;
670 
671 typedef enum NvmeResetType {
672     NVME_RESET_FUNCTION   = 0,
673     NVME_RESET_CONTROLLER = 1,
674 } NvmeResetType;
675 
nvme_ns(NvmeCtrl * n,uint32_t nsid)676 static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid)
677 {
678     if (!nsid || nsid > NVME_MAX_NAMESPACES) {
679         return NULL;
680     }
681 
682     return n->namespaces[nsid];
683 }
684 
nvme_cq(NvmeRequest * req)685 static inline NvmeCQueue *nvme_cq(NvmeRequest *req)
686 {
687     NvmeSQueue *sq = req->sq;
688     NvmeCtrl *n = sq->ctrl;
689 
690     return n->cq[sq->cqid];
691 }
692 
nvme_ctrl(NvmeRequest * req)693 static inline NvmeCtrl *nvme_ctrl(NvmeRequest *req)
694 {
695     NvmeSQueue *sq = req->sq;
696     return sq->ctrl;
697 }
698 
nvme_cid(NvmeRequest * req)699 static inline uint16_t nvme_cid(NvmeRequest *req)
700 {
701     if (!req) {
702         return 0xffff;
703     }
704 
705     return le16_to_cpu(req->cqe.cid);
706 }
707 
nvme_sctrl(NvmeCtrl * n)708 static inline NvmeSecCtrlEntry *nvme_sctrl(NvmeCtrl *n)
709 {
710     PCIDevice *pci_dev = &n->parent_obj;
711     NvmeCtrl *pf = NVME(pcie_sriov_get_pf(pci_dev));
712 
713     if (pci_is_vf(pci_dev)) {
714         return &pf->sec_ctrl_list[pcie_sriov_vf_number(pci_dev)];
715     }
716 
717     return NULL;
718 }
719 
nvme_sctrl_for_cntlid(NvmeCtrl * n,uint16_t cntlid)720 static inline NvmeSecCtrlEntry *nvme_sctrl_for_cntlid(NvmeCtrl *n,
721                                                       uint16_t cntlid)
722 {
723     NvmeSecCtrlEntry *list = n->sec_ctrl_list;
724     uint8_t i;
725 
726     for (i = 0; i < n->nr_sec_ctrls; i++) {
727         if (le16_to_cpu(list[i].scid) == cntlid) {
728             return &list[i];
729         }
730     }
731 
732     return NULL;
733 }
734 
735 void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns);
736 uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len,
737                           NvmeTxDirection dir, NvmeRequest *req);
738 uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len,
739                            NvmeTxDirection dir, NvmeRequest *req);
740 void nvme_rw_complete_cb(void *opaque, int ret);
741 uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len,
742                        NvmeCmd *cmd);
743 
744 void nvme_atomic_configure_max_write_size(bool dn, uint16_t awun,
745                                           uint16_t awupf, NvmeAtomic *atomic);
746 void nvme_ns_atomic_configure_boundary(bool dn, uint16_t nabsn,
747                                        uint16_t nabspf, NvmeAtomic *atomic);
748 
749 #endif /* HW_NVME_NVME_H */
750