1 /* 2 * QEMU NVM Express 3 * 4 * Copyright (c) 2012 Intel Corporation 5 * Copyright (c) 2021 Minwoo Im 6 * Copyright (c) 2021 Samsung Electronics Co., Ltd. 7 * 8 * Authors: 9 * Keith Busch <kbusch@kernel.org> 10 * Klaus Jensen <k.jensen@samsung.com> 11 * Gollu Appalanaidu <anaidu.gollu@samsung.com> 12 * Dmitry Fomichev <dmitry.fomichev@wdc.com> 13 * Minwoo Im <minwoo.im.dev@gmail.com> 14 * 15 * This code is licensed under the GNU GPL v2 or later. 16 */ 17 18 #ifndef HW_NVME_NVME_H 19 #define HW_NVME_NVME_H 20 21 #include "qemu/uuid.h" 22 #include "hw/pci/pci_device.h" 23 #include "hw/block/block.h" 24 25 #include "block/nvme.h" 26 27 #define NVME_MAX_CONTROLLERS 256 28 #define NVME_MAX_NAMESPACES 256 29 #define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000) 30 #define NVME_FDP_MAX_EVENTS 63 31 #define NVME_FDP_MAXPIDS 128 32 33 /* 34 * The controller only supports Submission and Completion Queue Entry Sizes of 35 * 64 and 16 bytes respectively. 36 */ 37 #define NVME_SQES 6 38 #define NVME_CQES 4 39 40 QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1); 41 42 typedef struct NvmeCtrl NvmeCtrl; 43 typedef struct NvmeNamespace NvmeNamespace; 44 45 #define TYPE_NVME_BUS "nvme-bus" 46 OBJECT_DECLARE_SIMPLE_TYPE(NvmeBus, NVME_BUS) 47 48 typedef struct NvmeBus { 49 BusState parent_bus; 50 } NvmeBus; 51 52 #define TYPE_NVME_SUBSYS "nvme-subsys" 53 #define NVME_SUBSYS(obj) \ 54 OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS) 55 #define SUBSYS_SLOT_RSVD (void *)0xFFFF 56 57 typedef struct NvmeReclaimUnit { 58 uint64_t ruamw; 59 } NvmeReclaimUnit; 60 61 typedef struct NvmeRuHandle { 62 uint8_t ruht; 63 uint8_t ruha; 64 uint64_t event_filter; 65 uint8_t lbafi; 66 uint64_t ruamw; 67 68 /* reclaim units indexed by reclaim group */ 69 NvmeReclaimUnit *rus; 70 } NvmeRuHandle; 71 72 typedef struct NvmeFdpEventBuffer { 73 NvmeFdpEvent events[NVME_FDP_MAX_EVENTS]; 74 unsigned int nelems; 75 unsigned int start; 76 unsigned int next; 77 } NvmeFdpEventBuffer; 78 79 typedef struct NvmeEnduranceGroup { 80 uint8_t event_conf; 81 82 struct { 83 NvmeFdpEventBuffer host_events, ctrl_events; 84 85 uint16_t nruh; 86 uint16_t nrg; 87 uint8_t rgif; 88 uint64_t runs; 89 90 uint64_t hbmw; 91 uint64_t mbmw; 92 uint64_t mbe; 93 94 bool enabled; 95 96 NvmeRuHandle *ruhs; 97 } fdp; 98 } NvmeEnduranceGroup; 99 100 typedef struct NvmeSubsystem { 101 DeviceState parent_obj; 102 NvmeBus bus; 103 uint8_t subnqn[256]; 104 char *serial; 105 106 NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS]; 107 NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; 108 NvmeEnduranceGroup endgrp; 109 110 struct { 111 char *nqn; 112 113 struct { 114 bool enabled; 115 uint64_t runs; 116 uint16_t nruh; 117 uint32_t nrg; 118 } fdp; 119 } params; 120 } NvmeSubsystem; 121 122 int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp); 123 void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n); 124 125 static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys, 126 uint32_t cntlid) 127 { 128 if (!subsys || cntlid >= NVME_MAX_CONTROLLERS) { 129 return NULL; 130 } 131 132 if (subsys->ctrls[cntlid] == SUBSYS_SLOT_RSVD) { 133 return NULL; 134 } 135 136 return subsys->ctrls[cntlid]; 137 } 138 139 static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys, 140 uint32_t nsid) 141 { 142 if (!subsys || !nsid || nsid > NVME_MAX_NAMESPACES) { 143 return NULL; 144 } 145 146 return subsys->namespaces[nsid]; 147 } 148 149 #define TYPE_NVME_NS "nvme-ns" 150 #define NVME_NS(obj) \ 151 OBJECT_CHECK(NvmeNamespace, (obj), TYPE_NVME_NS) 152 153 typedef struct NvmeZone { 154 NvmeZoneDescr d; 155 uint64_t w_ptr; 156 QTAILQ_ENTRY(NvmeZone) entry; 157 } NvmeZone; 158 159 #define FDP_EVT_MAX 0xff 160 #define NVME_FDP_MAX_NS_RUHS 32u 161 #define FDPVSS 0 162 163 static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = { 164 /* Host events */ 165 [FDP_EVT_RU_NOT_FULLY_WRITTEN] = 0, 166 [FDP_EVT_RU_ATL_EXCEEDED] = 1, 167 [FDP_EVT_CTRL_RESET_RUH] = 2, 168 [FDP_EVT_INVALID_PID] = 3, 169 /* CTRL events */ 170 [FDP_EVT_MEDIA_REALLOC] = 32, 171 [FDP_EVT_RUH_IMPLICIT_RU_CHANGE] = 33, 172 }; 173 174 #define NGUID_LEN 16 175 176 typedef struct { 177 uint8_t data[NGUID_LEN]; 178 } NvmeNGUID; 179 180 bool nvme_nguid_is_null(const NvmeNGUID *nguid); 181 182 extern const PropertyInfo qdev_prop_nguid; 183 184 #define DEFINE_PROP_NGUID_NODEFAULT(_name, _state, _field) \ 185 DEFINE_PROP(_name, _state, _field, qdev_prop_nguid, NvmeNGUID) 186 187 typedef struct NvmeNamespaceParams { 188 bool detached; 189 bool shared; 190 uint32_t nsid; 191 QemuUUID uuid; 192 NvmeNGUID nguid; 193 uint64_t eui64; 194 bool eui64_default; 195 196 uint16_t ms; 197 uint8_t mset; 198 uint8_t pi; 199 uint8_t pil; 200 uint8_t pif; 201 202 uint16_t mssrl; 203 uint32_t mcl; 204 uint8_t msrc; 205 206 bool zoned; 207 bool cross_zone_read; 208 uint64_t zone_size_bs; 209 uint64_t zone_cap_bs; 210 uint32_t max_active_zones; 211 uint32_t max_open_zones; 212 uint32_t zd_extension_size; 213 214 uint32_t numzrwa; 215 uint64_t zrwas; 216 uint64_t zrwafg; 217 218 struct { 219 char *ruhs; 220 } fdp; 221 222 struct { 223 uint16_t nawun; 224 uint16_t nawupf; 225 uint16_t nabsn; 226 uint16_t nabspf; 227 uint16_t nabo; 228 } atomic; 229 } NvmeNamespaceParams; 230 231 typedef struct NvmeAtomic { 232 uint32_t atomic_max_write_size; 233 uint64_t atomic_boundary; 234 uint64_t atomic_nabo; 235 bool atomic_writes; 236 } NvmeAtomic; 237 238 typedef struct NvmeNamespace { 239 DeviceState parent_obj; 240 BlockConf blkconf; 241 int32_t bootindex; 242 int64_t size; 243 int64_t moff; 244 NvmeIdNs id_ns; 245 NvmeIdNsNvm id_ns_nvm; 246 NvmeIdNsInd id_ns_ind; 247 NvmeLBAF lbaf; 248 unsigned int nlbaf; 249 size_t lbasz; 250 uint8_t csi; 251 uint16_t status; 252 int attached; 253 uint8_t pif; 254 255 struct { 256 uint16_t zrwas; 257 uint16_t zrwafg; 258 uint32_t numzrwa; 259 } zns; 260 261 QTAILQ_ENTRY(NvmeNamespace) entry; 262 263 NvmeIdNsZoned *id_ns_zoned; 264 NvmeZone *zone_array; 265 QTAILQ_HEAD(, NvmeZone) exp_open_zones; 266 QTAILQ_HEAD(, NvmeZone) imp_open_zones; 267 QTAILQ_HEAD(, NvmeZone) closed_zones; 268 QTAILQ_HEAD(, NvmeZone) full_zones; 269 uint32_t num_zones; 270 uint64_t zone_size; 271 uint64_t zone_capacity; 272 uint32_t zone_size_log2; 273 uint8_t *zd_extensions; 274 int32_t nr_open_zones; 275 int32_t nr_active_zones; 276 277 NvmeNamespaceParams params; 278 NvmeSubsystem *subsys; 279 NvmeEnduranceGroup *endgrp; 280 281 /* NULL for shared namespaces; set to specific controller if private */ 282 NvmeCtrl *ctrl; 283 284 struct { 285 uint32_t err_rec; 286 } features; 287 288 struct { 289 uint16_t nphs; 290 /* reclaim unit handle identifiers indexed by placement handle */ 291 uint16_t *phs; 292 } fdp; 293 294 NvmeAtomic atomic; 295 } NvmeNamespace; 296 297 static inline uint32_t nvme_nsid(NvmeNamespace *ns) 298 { 299 if (ns) { 300 return ns->params.nsid; 301 } 302 303 return 0; 304 } 305 306 static inline size_t nvme_l2b(NvmeNamespace *ns, uint64_t lba) 307 { 308 return lba << ns->lbaf.ds; 309 } 310 311 static inline size_t nvme_m2b(NvmeNamespace *ns, uint64_t lba) 312 { 313 return ns->lbaf.ms * lba; 314 } 315 316 static inline int64_t nvme_moff(NvmeNamespace *ns, uint64_t lba) 317 { 318 return ns->moff + nvme_m2b(ns, lba); 319 } 320 321 static inline bool nvme_ns_ext(NvmeNamespace *ns) 322 { 323 return !!NVME_ID_NS_FLBAS_EXTENDED(ns->id_ns.flbas); 324 } 325 326 static inline NvmeZoneState nvme_get_zone_state(NvmeZone *zone) 327 { 328 return zone->d.zs >> 4; 329 } 330 331 static inline void nvme_set_zone_state(NvmeZone *zone, NvmeZoneState state) 332 { 333 zone->d.zs = state << 4; 334 } 335 336 static inline uint64_t nvme_zone_rd_boundary(NvmeNamespace *ns, NvmeZone *zone) 337 { 338 return zone->d.zslba + ns->zone_size; 339 } 340 341 static inline uint64_t nvme_zone_wr_boundary(NvmeZone *zone) 342 { 343 return zone->d.zslba + zone->d.zcap; 344 } 345 346 static inline bool nvme_wp_is_valid(NvmeZone *zone) 347 { 348 uint8_t st = nvme_get_zone_state(zone); 349 350 return st != NVME_ZONE_STATE_FULL && 351 st != NVME_ZONE_STATE_READ_ONLY && 352 st != NVME_ZONE_STATE_OFFLINE; 353 } 354 355 static inline uint8_t *nvme_get_zd_extension(NvmeNamespace *ns, 356 uint32_t zone_idx) 357 { 358 return &ns->zd_extensions[zone_idx * ns->params.zd_extension_size]; 359 } 360 361 static inline void nvme_aor_inc_open(NvmeNamespace *ns) 362 { 363 assert(ns->nr_open_zones >= 0); 364 if (ns->params.max_open_zones) { 365 ns->nr_open_zones++; 366 assert(ns->nr_open_zones <= ns->params.max_open_zones); 367 } 368 } 369 370 static inline void nvme_aor_dec_open(NvmeNamespace *ns) 371 { 372 if (ns->params.max_open_zones) { 373 assert(ns->nr_open_zones > 0); 374 ns->nr_open_zones--; 375 } 376 assert(ns->nr_open_zones >= 0); 377 } 378 379 static inline void nvme_aor_inc_active(NvmeNamespace *ns) 380 { 381 assert(ns->nr_active_zones >= 0); 382 if (ns->params.max_active_zones) { 383 ns->nr_active_zones++; 384 assert(ns->nr_active_zones <= ns->params.max_active_zones); 385 } 386 } 387 388 static inline void nvme_aor_dec_active(NvmeNamespace *ns) 389 { 390 if (ns->params.max_active_zones) { 391 assert(ns->nr_active_zones > 0); 392 ns->nr_active_zones--; 393 assert(ns->nr_active_zones >= ns->nr_open_zones); 394 } 395 assert(ns->nr_active_zones >= 0); 396 } 397 398 static inline void nvme_fdp_stat_inc(uint64_t *a, uint64_t b) 399 { 400 uint64_t ret = *a + b; 401 *a = ret < *a ? UINT64_MAX : ret; 402 } 403 404 void nvme_ns_init_format(NvmeNamespace *ns); 405 int nvme_ns_setup(NvmeNamespace *ns, Error **errp); 406 void nvme_ns_drain(NvmeNamespace *ns); 407 void nvme_ns_shutdown(NvmeNamespace *ns); 408 void nvme_ns_cleanup(NvmeNamespace *ns); 409 410 typedef struct NvmeAsyncEvent { 411 QTAILQ_ENTRY(NvmeAsyncEvent) entry; 412 NvmeAerResult result; 413 } NvmeAsyncEvent; 414 415 enum { 416 NVME_SG_ALLOC = 1 << 0, 417 NVME_SG_DMA = 1 << 1, 418 }; 419 420 typedef struct NvmeSg { 421 int flags; 422 423 union { 424 QEMUSGList qsg; 425 QEMUIOVector iov; 426 }; 427 } NvmeSg; 428 429 typedef enum NvmeTxDirection { 430 NVME_TX_DIRECTION_TO_DEVICE = 0, 431 NVME_TX_DIRECTION_FROM_DEVICE = 1, 432 } NvmeTxDirection; 433 434 typedef struct NvmeRequest { 435 struct NvmeSQueue *sq; 436 struct NvmeNamespace *ns; 437 BlockAIOCB *aiocb; 438 uint16_t status; 439 void *opaque; 440 NvmeCqe cqe; 441 NvmeCmd cmd; 442 BlockAcctCookie acct; 443 NvmeSg sg; 444 bool atomic_write; 445 QTAILQ_ENTRY(NvmeRequest)entry; 446 } NvmeRequest; 447 448 typedef struct NvmeBounceContext { 449 NvmeRequest *req; 450 451 struct { 452 QEMUIOVector iov; 453 uint8_t *bounce; 454 } data, mdata; 455 } NvmeBounceContext; 456 457 static inline const char *nvme_adm_opc_str(uint8_t opc) 458 { 459 switch (opc) { 460 case NVME_ADM_CMD_DELETE_SQ: return "NVME_ADM_CMD_DELETE_SQ"; 461 case NVME_ADM_CMD_CREATE_SQ: return "NVME_ADM_CMD_CREATE_SQ"; 462 case NVME_ADM_CMD_GET_LOG_PAGE: return "NVME_ADM_CMD_GET_LOG_PAGE"; 463 case NVME_ADM_CMD_DELETE_CQ: return "NVME_ADM_CMD_DELETE_CQ"; 464 case NVME_ADM_CMD_CREATE_CQ: return "NVME_ADM_CMD_CREATE_CQ"; 465 case NVME_ADM_CMD_IDENTIFY: return "NVME_ADM_CMD_IDENTIFY"; 466 case NVME_ADM_CMD_ABORT: return "NVME_ADM_CMD_ABORT"; 467 case NVME_ADM_CMD_SET_FEATURES: return "NVME_ADM_CMD_SET_FEATURES"; 468 case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES"; 469 case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ"; 470 case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT"; 471 case NVME_ADM_CMD_DIRECTIVE_SEND: return "NVME_ADM_CMD_DIRECTIVE_SEND"; 472 case NVME_ADM_CMD_VIRT_MNGMT: return "NVME_ADM_CMD_VIRT_MNGMT"; 473 case NVME_ADM_CMD_DIRECTIVE_RECV: return "NVME_ADM_CMD_DIRECTIVE_RECV"; 474 case NVME_ADM_CMD_DBBUF_CONFIG: return "NVME_ADM_CMD_DBBUF_CONFIG"; 475 case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM"; 476 case NVME_ADM_CMD_SECURITY_SEND: return "NVME_ADM_CMD_SECURITY_SEND"; 477 case NVME_ADM_CMD_SECURITY_RECV: return "NVME_ADM_CMD_SECURITY_RECV"; 478 default: return "NVME_ADM_CMD_UNKNOWN"; 479 } 480 } 481 482 static inline const char *nvme_io_opc_str(uint8_t opc) 483 { 484 switch (opc) { 485 case NVME_CMD_FLUSH: return "NVME_NVM_CMD_FLUSH"; 486 case NVME_CMD_WRITE: return "NVME_NVM_CMD_WRITE"; 487 case NVME_CMD_READ: return "NVME_NVM_CMD_READ"; 488 case NVME_CMD_COMPARE: return "NVME_NVM_CMD_COMPARE"; 489 case NVME_CMD_WRITE_ZEROES: return "NVME_NVM_CMD_WRITE_ZEROES"; 490 case NVME_CMD_DSM: return "NVME_NVM_CMD_DSM"; 491 case NVME_CMD_VERIFY: return "NVME_NVM_CMD_VERIFY"; 492 case NVME_CMD_COPY: return "NVME_NVM_CMD_COPY"; 493 case NVME_CMD_ZONE_MGMT_SEND: return "NVME_ZONED_CMD_MGMT_SEND"; 494 case NVME_CMD_ZONE_MGMT_RECV: return "NVME_ZONED_CMD_MGMT_RECV"; 495 case NVME_CMD_ZONE_APPEND: return "NVME_ZONED_CMD_ZONE_APPEND"; 496 default: return "NVME_NVM_CMD_UNKNOWN"; 497 } 498 } 499 500 typedef struct NvmeSQueue { 501 struct NvmeCtrl *ctrl; 502 uint16_t sqid; 503 uint16_t cqid; 504 uint32_t head; 505 uint32_t tail; 506 uint32_t size; 507 uint64_t dma_addr; 508 uint64_t db_addr; 509 uint64_t ei_addr; 510 QEMUBH *bh; 511 EventNotifier notifier; 512 bool ioeventfd_enabled; 513 NvmeRequest *io_req; 514 QTAILQ_HEAD(, NvmeRequest) req_list; 515 QTAILQ_HEAD(, NvmeRequest) out_req_list; 516 QTAILQ_ENTRY(NvmeSQueue) entry; 517 } NvmeSQueue; 518 519 typedef struct NvmeCQueue { 520 struct NvmeCtrl *ctrl; 521 uint8_t phase; 522 uint16_t cqid; 523 uint16_t irq_enabled; 524 uint32_t head; 525 uint32_t tail; 526 uint32_t vector; 527 uint32_t size; 528 uint64_t dma_addr; 529 uint64_t db_addr; 530 uint64_t ei_addr; 531 QEMUBH *bh; 532 EventNotifier notifier; 533 bool ioeventfd_enabled; 534 QTAILQ_HEAD(, NvmeSQueue) sq_list; 535 QTAILQ_HEAD(, NvmeRequest) req_list; 536 } NvmeCQueue; 537 538 #define TYPE_NVME "nvme" 539 #define NVME(obj) \ 540 OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME) 541 542 typedef struct NvmeParams { 543 char *serial; 544 uint32_t num_queues; /* deprecated since 5.1 */ 545 uint32_t max_ioqpairs; 546 uint16_t msix_qsize; 547 uint16_t mqes; 548 uint32_t cmb_size_mb; 549 uint8_t aerl; 550 uint32_t aer_max_queued; 551 uint8_t mdts; 552 uint8_t vsl; 553 bool use_intel_id; 554 uint8_t zasl; 555 bool auto_transition_zones; 556 bool legacy_cmb; 557 bool ioeventfd; 558 bool dbcs; 559 uint16_t sriov_max_vfs; 560 uint16_t sriov_vq_flexible; 561 uint16_t sriov_vi_flexible; 562 uint32_t sriov_max_vq_per_vf; 563 uint32_t sriov_max_vi_per_vf; 564 bool msix_exclusive_bar; 565 bool ocp; 566 567 struct { 568 bool mem; 569 } ctratt; 570 571 uint16_t atomic_awun; 572 uint16_t atomic_awupf; 573 bool atomic_dn; 574 } NvmeParams; 575 576 typedef struct NvmeCtrl { 577 PCIDevice parent_obj; 578 MemoryRegion bar0; 579 MemoryRegion iomem; 580 NvmeBar bar; 581 NvmeParams params; 582 NvmeBus bus; 583 584 uint16_t cntlid; 585 bool qs_created; 586 uint32_t page_size; 587 uint16_t page_bits; 588 uint16_t max_prp_ents; 589 uint32_t max_q_ents; 590 uint8_t outstanding_aers; 591 uint32_t irq_status; 592 int cq_pending; 593 uint64_t host_timestamp; /* Timestamp sent by the host */ 594 uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */ 595 uint64_t starttime_ms; 596 uint16_t temperature; 597 uint8_t smart_critical_warning; 598 uint32_t conf_msix_qsize; 599 uint32_t conf_ioqpairs; 600 uint64_t dbbuf_dbs; 601 uint64_t dbbuf_eis; 602 bool dbbuf_enabled; 603 604 struct { 605 uint32_t acs[256]; 606 struct { 607 uint32_t nvm[256]; 608 uint32_t zoned[256]; 609 } iocs; 610 } cse; 611 612 struct { 613 MemoryRegion mem; 614 uint8_t *buf; 615 bool cmse; 616 hwaddr cba; 617 } cmb; 618 619 struct { 620 HostMemoryBackend *dev; 621 bool cmse; 622 hwaddr cba; 623 } pmr; 624 625 uint8_t aer_mask; 626 NvmeRequest **aer_reqs; 627 QTAILQ_HEAD(, NvmeAsyncEvent) aer_queue; 628 int aer_queued; 629 630 uint32_t dmrsl; 631 632 /* Namespace ID is started with 1 so bitmap should be 1-based */ 633 #define NVME_CHANGED_NSID_SIZE (NVME_MAX_NAMESPACES + 1) 634 DECLARE_BITMAP(changed_nsids, NVME_CHANGED_NSID_SIZE); 635 636 NvmeSubsystem *subsys; 637 638 NvmeNamespace namespace; 639 NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; 640 NvmeSQueue **sq; 641 NvmeCQueue **cq; 642 NvmeSQueue admin_sq; 643 NvmeCQueue admin_cq; 644 NvmeIdCtrl id_ctrl; 645 646 struct { 647 struct { 648 uint16_t temp_thresh_hi; 649 uint16_t temp_thresh_low; 650 }; 651 652 uint32_t async_config; 653 NvmeHostBehaviorSupport hbs; 654 } features; 655 656 NvmePriCtrlCap pri_ctrl_cap; 657 uint32_t nr_sec_ctrls; 658 NvmeSecCtrlEntry *sec_ctrl_list; 659 struct { 660 uint16_t vqrfap; 661 uint16_t virfap; 662 } next_pri_ctrl_cap; /* These override pri_ctrl_cap after reset */ 663 uint32_t dn; /* Disable Normal */ 664 NvmeAtomic atomic; 665 666 /* Socket mapping to SPDM over NVMe Security In/Out commands */ 667 int spdm_socket; 668 } NvmeCtrl; 669 670 typedef enum NvmeResetType { 671 NVME_RESET_FUNCTION = 0, 672 NVME_RESET_CONTROLLER = 1, 673 } NvmeResetType; 674 675 static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid) 676 { 677 if (!nsid || nsid > NVME_MAX_NAMESPACES) { 678 return NULL; 679 } 680 681 return n->namespaces[nsid]; 682 } 683 684 static inline NvmeCQueue *nvme_cq(NvmeRequest *req) 685 { 686 NvmeSQueue *sq = req->sq; 687 NvmeCtrl *n = sq->ctrl; 688 689 return n->cq[sq->cqid]; 690 } 691 692 static inline NvmeCtrl *nvme_ctrl(NvmeRequest *req) 693 { 694 NvmeSQueue *sq = req->sq; 695 return sq->ctrl; 696 } 697 698 static inline uint16_t nvme_cid(NvmeRequest *req) 699 { 700 if (!req) { 701 return 0xffff; 702 } 703 704 return le16_to_cpu(req->cqe.cid); 705 } 706 707 static inline NvmeSecCtrlEntry *nvme_sctrl(NvmeCtrl *n) 708 { 709 PCIDevice *pci_dev = &n->parent_obj; 710 NvmeCtrl *pf = NVME(pcie_sriov_get_pf(pci_dev)); 711 712 if (pci_is_vf(pci_dev)) { 713 return &pf->sec_ctrl_list[pcie_sriov_vf_number(pci_dev)]; 714 } 715 716 return NULL; 717 } 718 719 static inline NvmeSecCtrlEntry *nvme_sctrl_for_cntlid(NvmeCtrl *n, 720 uint16_t cntlid) 721 { 722 NvmeSecCtrlEntry *list = n->sec_ctrl_list; 723 uint8_t i; 724 725 for (i = 0; i < n->nr_sec_ctrls; i++) { 726 if (le16_to_cpu(list[i].scid) == cntlid) { 727 return &list[i]; 728 } 729 } 730 731 return NULL; 732 } 733 734 void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns); 735 uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len, 736 NvmeTxDirection dir, NvmeRequest *req); 737 uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len, 738 NvmeTxDirection dir, NvmeRequest *req); 739 void nvme_rw_complete_cb(void *opaque, int ret); 740 uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, 741 NvmeCmd *cmd); 742 743 void nvme_atomic_configure_max_write_size(bool dn, uint16_t awun, 744 uint16_t awupf, NvmeAtomic *atomic); 745 void nvme_ns_atomic_configure_boundary(bool dn, uint16_t nabsn, 746 uint16_t nabspf, NvmeAtomic *atomic); 747 748 #endif /* HW_NVME_NVME_H */ 749