xref: /openbmc/qemu/hw/nvme/nvme.h (revision 44a9394b1d272b53306d097d4bc20ff7ad14b159)
1  /*
2   * QEMU NVM Express
3   *
4   * Copyright (c) 2012 Intel Corporation
5   * Copyright (c) 2021 Minwoo Im
6   * Copyright (c) 2021 Samsung Electronics Co., Ltd.
7   *
8   * Authors:
9   *   Keith Busch            <kbusch@kernel.org>
10   *   Klaus Jensen           <k.jensen@samsung.com>
11   *   Gollu Appalanaidu      <anaidu.gollu@samsung.com>
12   *   Dmitry Fomichev        <dmitry.fomichev@wdc.com>
13   *   Minwoo Im              <minwoo.im.dev@gmail.com>
14   *
15   * This code is licensed under the GNU GPL v2 or later.
16   */
17  
18  #ifndef HW_NVME_NVME_H
19  #define HW_NVME_NVME_H
20  
21  #include "qemu/uuid.h"
22  #include "hw/pci/pci_device.h"
23  #include "hw/block/block.h"
24  
25  #include "block/nvme.h"
26  
27  #define NVME_MAX_CONTROLLERS 256
28  #define NVME_MAX_NAMESPACES  256
29  #define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000)
30  #define NVME_FDP_MAX_EVENTS 63
31  #define NVME_FDP_MAXPIDS 128
32  
33  /*
34   * The controller only supports Submission and Completion Queue Entry Sizes of
35   * 64 and 16 bytes respectively.
36   */
37  #define NVME_SQES 6
38  #define NVME_CQES 4
39  
40  QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1);
41  
42  typedef struct NvmeCtrl NvmeCtrl;
43  typedef struct NvmeNamespace NvmeNamespace;
44  
45  #define TYPE_NVME_BUS "nvme-bus"
46  OBJECT_DECLARE_SIMPLE_TYPE(NvmeBus, NVME_BUS)
47  
48  typedef struct NvmeBus {
49      BusState parent_bus;
50  } NvmeBus;
51  
52  #define TYPE_NVME_SUBSYS "nvme-subsys"
53  #define NVME_SUBSYS(obj) \
54      OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
55  #define SUBSYS_SLOT_RSVD (void *)0xFFFF
56  
57  typedef struct NvmeReclaimUnit {
58      uint64_t ruamw;
59  } NvmeReclaimUnit;
60  
61  typedef struct NvmeRuHandle {
62      uint8_t  ruht;
63      uint8_t  ruha;
64      uint64_t event_filter;
65      uint8_t  lbafi;
66      uint64_t ruamw;
67  
68      /* reclaim units indexed by reclaim group */
69      NvmeReclaimUnit *rus;
70  } NvmeRuHandle;
71  
72  typedef struct NvmeFdpEventBuffer {
73      NvmeFdpEvent     events[NVME_FDP_MAX_EVENTS];
74      unsigned int     nelems;
75      unsigned int     start;
76      unsigned int     next;
77  } NvmeFdpEventBuffer;
78  
79  typedef struct NvmeEnduranceGroup {
80      uint8_t event_conf;
81  
82      struct {
83          NvmeFdpEventBuffer host_events, ctrl_events;
84  
85          uint16_t nruh;
86          uint16_t nrg;
87          uint8_t  rgif;
88          uint64_t runs;
89  
90          uint64_t hbmw;
91          uint64_t mbmw;
92          uint64_t mbe;
93  
94          bool enabled;
95  
96          NvmeRuHandle *ruhs;
97      } fdp;
98  } NvmeEnduranceGroup;
99  
100  typedef struct NvmeSubsystem {
101      DeviceState parent_obj;
102      NvmeBus     bus;
103      uint8_t     subnqn[256];
104      char        *serial;
105  
106      NvmeCtrl           *ctrls[NVME_MAX_CONTROLLERS];
107      NvmeNamespace      *namespaces[NVME_MAX_NAMESPACES + 1];
108      NvmeEnduranceGroup endgrp;
109  
110      struct {
111          char *nqn;
112  
113          struct {
114              bool     enabled;
115              uint64_t runs;
116              uint16_t nruh;
117              uint32_t nrg;
118          } fdp;
119      } params;
120  } NvmeSubsystem;
121  
122  int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp);
123  void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n);
124  
nvme_subsys_ctrl(NvmeSubsystem * subsys,uint32_t cntlid)125  static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys,
126                                           uint32_t cntlid)
127  {
128      if (!subsys || cntlid >= NVME_MAX_CONTROLLERS) {
129          return NULL;
130      }
131  
132      if (subsys->ctrls[cntlid] == SUBSYS_SLOT_RSVD) {
133          return NULL;
134      }
135  
136      return subsys->ctrls[cntlid];
137  }
138  
nvme_subsys_ns(NvmeSubsystem * subsys,uint32_t nsid)139  static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys,
140                                              uint32_t nsid)
141  {
142      if (!subsys || !nsid || nsid > NVME_MAX_NAMESPACES) {
143          return NULL;
144      }
145  
146      return subsys->namespaces[nsid];
147  }
148  
149  #define TYPE_NVME_NS "nvme-ns"
150  #define NVME_NS(obj) \
151      OBJECT_CHECK(NvmeNamespace, (obj), TYPE_NVME_NS)
152  
153  typedef struct NvmeZone {
154      NvmeZoneDescr   d;
155      uint64_t        w_ptr;
156      QTAILQ_ENTRY(NvmeZone) entry;
157  } NvmeZone;
158  
159  #define FDP_EVT_MAX 0xff
160  #define NVME_FDP_MAX_NS_RUHS 32u
161  #define FDPVSS 0
162  
163  static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = {
164      /* Host events */
165      [FDP_EVT_RU_NOT_FULLY_WRITTEN]      = 0,
166      [FDP_EVT_RU_ATL_EXCEEDED]           = 1,
167      [FDP_EVT_CTRL_RESET_RUH]            = 2,
168      [FDP_EVT_INVALID_PID]               = 3,
169      /* CTRL events */
170      [FDP_EVT_MEDIA_REALLOC]             = 32,
171      [FDP_EVT_RUH_IMPLICIT_RU_CHANGE]    = 33,
172  };
173  
174  #define NGUID_LEN 16
175  
176  typedef struct {
177      uint8_t data[NGUID_LEN];
178  } NvmeNGUID;
179  
180  bool nvme_nguid_is_null(const NvmeNGUID *nguid);
181  
182  extern const PropertyInfo qdev_prop_nguid;
183  
184  #define DEFINE_PROP_NGUID_NODEFAULT(_name, _state, _field) \
185      DEFINE_PROP(_name, _state, _field, qdev_prop_nguid, NvmeNGUID)
186  
187  typedef struct NvmeNamespaceParams {
188      bool      detached;
189      bool      shared;
190      uint32_t  nsid;
191      QemuUUID  uuid;
192      NvmeNGUID nguid;
193      uint64_t  eui64;
194      bool      eui64_default;
195  
196      uint16_t ms;
197      uint8_t  mset;
198      uint8_t  pi;
199      uint8_t  pil;
200      uint8_t  pif;
201  
202      uint16_t mssrl;
203      uint32_t mcl;
204      uint8_t  msrc;
205  
206      bool     zoned;
207      bool     cross_zone_read;
208      uint64_t zone_size_bs;
209      uint64_t zone_cap_bs;
210      uint32_t max_active_zones;
211      uint32_t max_open_zones;
212      uint32_t zd_extension_size;
213  
214      uint32_t numzrwa;
215      uint64_t zrwas;
216      uint64_t zrwafg;
217  
218      struct {
219          char *ruhs;
220      } fdp;
221  } NvmeNamespaceParams;
222  
223  typedef struct NvmeAtomic {
224      uint32_t    atomic_max_write_size;
225      bool        atomic_writes;
226  } NvmeAtomic;
227  
228  typedef struct NvmeNamespace {
229      DeviceState  parent_obj;
230      BlockConf    blkconf;
231      int32_t      bootindex;
232      int64_t      size;
233      int64_t      moff;
234      NvmeIdNs     id_ns;
235      NvmeIdNsNvm  id_ns_nvm;
236      NvmeIdNsInd  id_ns_ind;
237      NvmeLBAF     lbaf;
238      unsigned int nlbaf;
239      size_t       lbasz;
240      const uint32_t *iocs;
241      uint8_t      csi;
242      uint16_t     status;
243      int          attached;
244      uint8_t      pif;
245  
246      struct {
247          uint16_t zrwas;
248          uint16_t zrwafg;
249          uint32_t numzrwa;
250      } zns;
251  
252      QTAILQ_ENTRY(NvmeNamespace) entry;
253  
254      NvmeIdNsZoned   *id_ns_zoned;
255      NvmeZone        *zone_array;
256      QTAILQ_HEAD(, NvmeZone) exp_open_zones;
257      QTAILQ_HEAD(, NvmeZone) imp_open_zones;
258      QTAILQ_HEAD(, NvmeZone) closed_zones;
259      QTAILQ_HEAD(, NvmeZone) full_zones;
260      uint32_t        num_zones;
261      uint64_t        zone_size;
262      uint64_t        zone_capacity;
263      uint32_t        zone_size_log2;
264      uint8_t         *zd_extensions;
265      int32_t         nr_open_zones;
266      int32_t         nr_active_zones;
267  
268      NvmeNamespaceParams params;
269      NvmeSubsystem *subsys;
270      NvmeEnduranceGroup *endgrp;
271  
272      struct {
273          uint32_t err_rec;
274      } features;
275  
276      struct {
277          uint16_t nphs;
278          /* reclaim unit handle identifiers indexed by placement handle */
279          uint16_t *phs;
280      } fdp;
281  } NvmeNamespace;
282  
nvme_nsid(NvmeNamespace * ns)283  static inline uint32_t nvme_nsid(NvmeNamespace *ns)
284  {
285      if (ns) {
286          return ns->params.nsid;
287      }
288  
289      return 0;
290  }
291  
nvme_l2b(NvmeNamespace * ns,uint64_t lba)292  static inline size_t nvme_l2b(NvmeNamespace *ns, uint64_t lba)
293  {
294      return lba << ns->lbaf.ds;
295  }
296  
nvme_m2b(NvmeNamespace * ns,uint64_t lba)297  static inline size_t nvme_m2b(NvmeNamespace *ns, uint64_t lba)
298  {
299      return ns->lbaf.ms * lba;
300  }
301  
nvme_moff(NvmeNamespace * ns,uint64_t lba)302  static inline int64_t nvme_moff(NvmeNamespace *ns, uint64_t lba)
303  {
304      return ns->moff + nvme_m2b(ns, lba);
305  }
306  
nvme_ns_ext(NvmeNamespace * ns)307  static inline bool nvme_ns_ext(NvmeNamespace *ns)
308  {
309      return !!NVME_ID_NS_FLBAS_EXTENDED(ns->id_ns.flbas);
310  }
311  
nvme_get_zone_state(NvmeZone * zone)312  static inline NvmeZoneState nvme_get_zone_state(NvmeZone *zone)
313  {
314      return zone->d.zs >> 4;
315  }
316  
nvme_set_zone_state(NvmeZone * zone,NvmeZoneState state)317  static inline void nvme_set_zone_state(NvmeZone *zone, NvmeZoneState state)
318  {
319      zone->d.zs = state << 4;
320  }
321  
nvme_zone_rd_boundary(NvmeNamespace * ns,NvmeZone * zone)322  static inline uint64_t nvme_zone_rd_boundary(NvmeNamespace *ns, NvmeZone *zone)
323  {
324      return zone->d.zslba + ns->zone_size;
325  }
326  
nvme_zone_wr_boundary(NvmeZone * zone)327  static inline uint64_t nvme_zone_wr_boundary(NvmeZone *zone)
328  {
329      return zone->d.zslba + zone->d.zcap;
330  }
331  
nvme_wp_is_valid(NvmeZone * zone)332  static inline bool nvme_wp_is_valid(NvmeZone *zone)
333  {
334      uint8_t st = nvme_get_zone_state(zone);
335  
336      return st != NVME_ZONE_STATE_FULL &&
337             st != NVME_ZONE_STATE_READ_ONLY &&
338             st != NVME_ZONE_STATE_OFFLINE;
339  }
340  
nvme_get_zd_extension(NvmeNamespace * ns,uint32_t zone_idx)341  static inline uint8_t *nvme_get_zd_extension(NvmeNamespace *ns,
342                                               uint32_t zone_idx)
343  {
344      return &ns->zd_extensions[zone_idx * ns->params.zd_extension_size];
345  }
346  
nvme_aor_inc_open(NvmeNamespace * ns)347  static inline void nvme_aor_inc_open(NvmeNamespace *ns)
348  {
349      assert(ns->nr_open_zones >= 0);
350      if (ns->params.max_open_zones) {
351          ns->nr_open_zones++;
352          assert(ns->nr_open_zones <= ns->params.max_open_zones);
353      }
354  }
355  
nvme_aor_dec_open(NvmeNamespace * ns)356  static inline void nvme_aor_dec_open(NvmeNamespace *ns)
357  {
358      if (ns->params.max_open_zones) {
359          assert(ns->nr_open_zones > 0);
360          ns->nr_open_zones--;
361      }
362      assert(ns->nr_open_zones >= 0);
363  }
364  
nvme_aor_inc_active(NvmeNamespace * ns)365  static inline void nvme_aor_inc_active(NvmeNamespace *ns)
366  {
367      assert(ns->nr_active_zones >= 0);
368      if (ns->params.max_active_zones) {
369          ns->nr_active_zones++;
370          assert(ns->nr_active_zones <= ns->params.max_active_zones);
371      }
372  }
373  
nvme_aor_dec_active(NvmeNamespace * ns)374  static inline void nvme_aor_dec_active(NvmeNamespace *ns)
375  {
376      if (ns->params.max_active_zones) {
377          assert(ns->nr_active_zones > 0);
378          ns->nr_active_zones--;
379          assert(ns->nr_active_zones >= ns->nr_open_zones);
380      }
381      assert(ns->nr_active_zones >= 0);
382  }
383  
nvme_fdp_stat_inc(uint64_t * a,uint64_t b)384  static inline void nvme_fdp_stat_inc(uint64_t *a, uint64_t b)
385  {
386      uint64_t ret = *a + b;
387      *a = ret < *a ? UINT64_MAX : ret;
388  }
389  
390  void nvme_ns_init_format(NvmeNamespace *ns);
391  int nvme_ns_setup(NvmeNamespace *ns, Error **errp);
392  void nvme_ns_drain(NvmeNamespace *ns);
393  void nvme_ns_shutdown(NvmeNamespace *ns);
394  void nvme_ns_cleanup(NvmeNamespace *ns);
395  
396  typedef struct NvmeAsyncEvent {
397      QTAILQ_ENTRY(NvmeAsyncEvent) entry;
398      NvmeAerResult result;
399  } NvmeAsyncEvent;
400  
401  enum {
402      NVME_SG_ALLOC = 1 << 0,
403      NVME_SG_DMA   = 1 << 1,
404  };
405  
406  typedef struct NvmeSg {
407      int flags;
408  
409      union {
410          QEMUSGList   qsg;
411          QEMUIOVector iov;
412      };
413  } NvmeSg;
414  
415  typedef enum NvmeTxDirection {
416      NVME_TX_DIRECTION_TO_DEVICE   = 0,
417      NVME_TX_DIRECTION_FROM_DEVICE = 1,
418  } NvmeTxDirection;
419  
420  typedef struct NvmeRequest {
421      struct NvmeSQueue       *sq;
422      struct NvmeNamespace    *ns;
423      BlockAIOCB              *aiocb;
424      uint16_t                status;
425      void                    *opaque;
426      NvmeCqe                 cqe;
427      NvmeCmd                 cmd;
428      BlockAcctCookie         acct;
429      NvmeSg                  sg;
430      bool                    atomic_write;
431      QTAILQ_ENTRY(NvmeRequest)entry;
432  } NvmeRequest;
433  
434  typedef struct NvmeBounceContext {
435      NvmeRequest *req;
436  
437      struct {
438          QEMUIOVector iov;
439          uint8_t *bounce;
440      } data, mdata;
441  } NvmeBounceContext;
442  
nvme_adm_opc_str(uint8_t opc)443  static inline const char *nvme_adm_opc_str(uint8_t opc)
444  {
445      switch (opc) {
446      case NVME_ADM_CMD_DELETE_SQ:        return "NVME_ADM_CMD_DELETE_SQ";
447      case NVME_ADM_CMD_CREATE_SQ:        return "NVME_ADM_CMD_CREATE_SQ";
448      case NVME_ADM_CMD_GET_LOG_PAGE:     return "NVME_ADM_CMD_GET_LOG_PAGE";
449      case NVME_ADM_CMD_DELETE_CQ:        return "NVME_ADM_CMD_DELETE_CQ";
450      case NVME_ADM_CMD_CREATE_CQ:        return "NVME_ADM_CMD_CREATE_CQ";
451      case NVME_ADM_CMD_IDENTIFY:         return "NVME_ADM_CMD_IDENTIFY";
452      case NVME_ADM_CMD_ABORT:            return "NVME_ADM_CMD_ABORT";
453      case NVME_ADM_CMD_SET_FEATURES:     return "NVME_ADM_CMD_SET_FEATURES";
454      case NVME_ADM_CMD_GET_FEATURES:     return "NVME_ADM_CMD_GET_FEATURES";
455      case NVME_ADM_CMD_ASYNC_EV_REQ:     return "NVME_ADM_CMD_ASYNC_EV_REQ";
456      case NVME_ADM_CMD_NS_ATTACHMENT:    return "NVME_ADM_CMD_NS_ATTACHMENT";
457      case NVME_ADM_CMD_DIRECTIVE_SEND:   return "NVME_ADM_CMD_DIRECTIVE_SEND";
458      case NVME_ADM_CMD_VIRT_MNGMT:       return "NVME_ADM_CMD_VIRT_MNGMT";
459      case NVME_ADM_CMD_DIRECTIVE_RECV:   return "NVME_ADM_CMD_DIRECTIVE_RECV";
460      case NVME_ADM_CMD_DBBUF_CONFIG:     return "NVME_ADM_CMD_DBBUF_CONFIG";
461      case NVME_ADM_CMD_FORMAT_NVM:       return "NVME_ADM_CMD_FORMAT_NVM";
462      default:                            return "NVME_ADM_CMD_UNKNOWN";
463      }
464  }
465  
nvme_io_opc_str(uint8_t opc)466  static inline const char *nvme_io_opc_str(uint8_t opc)
467  {
468      switch (opc) {
469      case NVME_CMD_FLUSH:            return "NVME_NVM_CMD_FLUSH";
470      case NVME_CMD_WRITE:            return "NVME_NVM_CMD_WRITE";
471      case NVME_CMD_READ:             return "NVME_NVM_CMD_READ";
472      case NVME_CMD_COMPARE:          return "NVME_NVM_CMD_COMPARE";
473      case NVME_CMD_WRITE_ZEROES:     return "NVME_NVM_CMD_WRITE_ZEROES";
474      case NVME_CMD_DSM:              return "NVME_NVM_CMD_DSM";
475      case NVME_CMD_VERIFY:           return "NVME_NVM_CMD_VERIFY";
476      case NVME_CMD_COPY:             return "NVME_NVM_CMD_COPY";
477      case NVME_CMD_ZONE_MGMT_SEND:   return "NVME_ZONED_CMD_MGMT_SEND";
478      case NVME_CMD_ZONE_MGMT_RECV:   return "NVME_ZONED_CMD_MGMT_RECV";
479      case NVME_CMD_ZONE_APPEND:      return "NVME_ZONED_CMD_ZONE_APPEND";
480      default:                        return "NVME_NVM_CMD_UNKNOWN";
481      }
482  }
483  
484  typedef struct NvmeSQueue {
485      struct NvmeCtrl *ctrl;
486      uint16_t    sqid;
487      uint16_t    cqid;
488      uint32_t    head;
489      uint32_t    tail;
490      uint32_t    size;
491      uint64_t    dma_addr;
492      uint64_t    db_addr;
493      uint64_t    ei_addr;
494      QEMUBH      *bh;
495      EventNotifier notifier;
496      bool        ioeventfd_enabled;
497      NvmeRequest *io_req;
498      QTAILQ_HEAD(, NvmeRequest) req_list;
499      QTAILQ_HEAD(, NvmeRequest) out_req_list;
500      QTAILQ_ENTRY(NvmeSQueue) entry;
501  } NvmeSQueue;
502  
503  typedef struct NvmeCQueue {
504      struct NvmeCtrl *ctrl;
505      uint8_t     phase;
506      uint16_t    cqid;
507      uint16_t    irq_enabled;
508      uint32_t    head;
509      uint32_t    tail;
510      uint32_t    vector;
511      uint32_t    size;
512      uint64_t    dma_addr;
513      uint64_t    db_addr;
514      uint64_t    ei_addr;
515      QEMUBH      *bh;
516      EventNotifier notifier;
517      bool        ioeventfd_enabled;
518      QTAILQ_HEAD(, NvmeSQueue) sq_list;
519      QTAILQ_HEAD(, NvmeRequest) req_list;
520  } NvmeCQueue;
521  
522  #define TYPE_NVME "nvme"
523  #define NVME(obj) \
524          OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME)
525  
526  typedef struct NvmeParams {
527      char     *serial;
528      uint32_t num_queues; /* deprecated since 5.1 */
529      uint32_t max_ioqpairs;
530      uint16_t msix_qsize;
531      uint16_t mqes;
532      uint32_t cmb_size_mb;
533      uint8_t  aerl;
534      uint32_t aer_max_queued;
535      uint8_t  mdts;
536      uint8_t  vsl;
537      bool     use_intel_id;
538      uint8_t  zasl;
539      bool     auto_transition_zones;
540      bool     legacy_cmb;
541      bool     ioeventfd;
542      uint16_t  sriov_max_vfs;
543      uint16_t sriov_vq_flexible;
544      uint16_t sriov_vi_flexible;
545      uint32_t  sriov_max_vq_per_vf;
546      uint32_t  sriov_max_vi_per_vf;
547      bool     msix_exclusive_bar;
548  
549      struct {
550          bool mem;
551      } ctratt;
552  
553      uint16_t atomic_awun;
554      uint16_t atomic_awupf;
555      bool     atomic_dn;
556  } NvmeParams;
557  
558  typedef struct NvmeCtrl {
559      PCIDevice    parent_obj;
560      MemoryRegion bar0;
561      MemoryRegion iomem;
562      NvmeBar      bar;
563      NvmeParams   params;
564      NvmeBus      bus;
565  
566      uint16_t    cntlid;
567      bool        qs_created;
568      uint32_t    page_size;
569      uint16_t    page_bits;
570      uint16_t    max_prp_ents;
571      uint32_t    max_q_ents;
572      uint8_t     outstanding_aers;
573      uint32_t    irq_status;
574      int         cq_pending;
575      uint64_t    host_timestamp;                 /* Timestamp sent by the host */
576      uint64_t    timestamp_set_qemu_clock_ms;    /* QEMU clock time */
577      uint64_t    starttime_ms;
578      uint16_t    temperature;
579      uint8_t     smart_critical_warning;
580      uint32_t    conf_msix_qsize;
581      uint32_t    conf_ioqpairs;
582      uint64_t    dbbuf_dbs;
583      uint64_t    dbbuf_eis;
584      bool        dbbuf_enabled;
585  
586      struct {
587          MemoryRegion mem;
588          uint8_t      *buf;
589          bool         cmse;
590          hwaddr       cba;
591      } cmb;
592  
593      struct {
594          HostMemoryBackend *dev;
595          bool              cmse;
596          hwaddr            cba;
597      } pmr;
598  
599      uint8_t     aer_mask;
600      NvmeRequest **aer_reqs;
601      QTAILQ_HEAD(, NvmeAsyncEvent) aer_queue;
602      int         aer_queued;
603  
604      uint32_t    dmrsl;
605  
606      /* Namespace ID is started with 1 so bitmap should be 1-based */
607  #define NVME_CHANGED_NSID_SIZE  (NVME_MAX_NAMESPACES + 1)
608      DECLARE_BITMAP(changed_nsids, NVME_CHANGED_NSID_SIZE);
609  
610      NvmeSubsystem   *subsys;
611  
612      NvmeNamespace   namespace;
613      NvmeNamespace   *namespaces[NVME_MAX_NAMESPACES + 1];
614      NvmeSQueue      **sq;
615      NvmeCQueue      **cq;
616      NvmeSQueue      admin_sq;
617      NvmeCQueue      admin_cq;
618      NvmeIdCtrl      id_ctrl;
619  
620      struct {
621          struct {
622              uint16_t temp_thresh_hi;
623              uint16_t temp_thresh_low;
624          };
625  
626          uint32_t                async_config;
627          NvmeHostBehaviorSupport hbs;
628      } features;
629  
630      NvmePriCtrlCap  pri_ctrl_cap;
631      uint32_t nr_sec_ctrls;
632      NvmeSecCtrlEntry *sec_ctrl_list;
633      struct {
634          uint16_t    vqrfap;
635          uint16_t    virfap;
636      } next_pri_ctrl_cap;    /* These override pri_ctrl_cap after reset */
637      uint32_t    dn; /* Disable Normal */
638      NvmeAtomic  atomic;
639  } NvmeCtrl;
640  
641  typedef enum NvmeResetType {
642      NVME_RESET_FUNCTION   = 0,
643      NVME_RESET_CONTROLLER = 1,
644  } NvmeResetType;
645  
nvme_ns(NvmeCtrl * n,uint32_t nsid)646  static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid)
647  {
648      if (!nsid || nsid > NVME_MAX_NAMESPACES) {
649          return NULL;
650      }
651  
652      return n->namespaces[nsid];
653  }
654  
nvme_cq(NvmeRequest * req)655  static inline NvmeCQueue *nvme_cq(NvmeRequest *req)
656  {
657      NvmeSQueue *sq = req->sq;
658      NvmeCtrl *n = sq->ctrl;
659  
660      return n->cq[sq->cqid];
661  }
662  
nvme_ctrl(NvmeRequest * req)663  static inline NvmeCtrl *nvme_ctrl(NvmeRequest *req)
664  {
665      NvmeSQueue *sq = req->sq;
666      return sq->ctrl;
667  }
668  
nvme_cid(NvmeRequest * req)669  static inline uint16_t nvme_cid(NvmeRequest *req)
670  {
671      if (!req) {
672          return 0xffff;
673      }
674  
675      return le16_to_cpu(req->cqe.cid);
676  }
677  
nvme_sctrl(NvmeCtrl * n)678  static inline NvmeSecCtrlEntry *nvme_sctrl(NvmeCtrl *n)
679  {
680      PCIDevice *pci_dev = &n->parent_obj;
681      NvmeCtrl *pf = NVME(pcie_sriov_get_pf(pci_dev));
682  
683      if (pci_is_vf(pci_dev)) {
684          return &pf->sec_ctrl_list[pcie_sriov_vf_number(pci_dev)];
685      }
686  
687      return NULL;
688  }
689  
nvme_sctrl_for_cntlid(NvmeCtrl * n,uint16_t cntlid)690  static inline NvmeSecCtrlEntry *nvme_sctrl_for_cntlid(NvmeCtrl *n,
691                                                        uint16_t cntlid)
692  {
693      NvmeSecCtrlEntry *list = n->sec_ctrl_list;
694      uint8_t i;
695  
696      for (i = 0; i < n->nr_sec_ctrls; i++) {
697          if (le16_to_cpu(list[i].scid) == cntlid) {
698              return &list[i];
699          }
700      }
701  
702      return NULL;
703  }
704  
705  void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns);
706  uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len,
707                            NvmeTxDirection dir, NvmeRequest *req);
708  uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len,
709                             NvmeTxDirection dir, NvmeRequest *req);
710  void nvme_rw_complete_cb(void *opaque, int ret);
711  uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len,
712                         NvmeCmd *cmd);
713  
714  #endif /* HW_NVME_NVME_H */
715