1 /* 2 * QEMU NVM Express 3 * 4 * Copyright (c) 2012 Intel Corporation 5 * Copyright (c) 2021 Minwoo Im 6 * Copyright (c) 2021 Samsung Electronics Co., Ltd. 7 * 8 * Authors: 9 * Keith Busch <kbusch@kernel.org> 10 * Klaus Jensen <k.jensen@samsung.com> 11 * Gollu Appalanaidu <anaidu.gollu@samsung.com> 12 * Dmitry Fomichev <dmitry.fomichev@wdc.com> 13 * Minwoo Im <minwoo.im.dev@gmail.com> 14 * 15 * This code is licensed under the GNU GPL v2 or later. 16 */ 17 18 #ifndef HW_NVME_INTERNAL_H 19 #define HW_NVME_INTERNAL_H 20 21 #include "qemu/uuid.h" 22 #include "hw/pci/pci.h" 23 #include "hw/block/block.h" 24 25 #include "block/nvme.h" 26 27 #define NVME_MAX_CONTROLLERS 32 28 #define NVME_MAX_NAMESPACES 256 29 #define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000) 30 31 QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1); 32 33 typedef struct NvmeCtrl NvmeCtrl; 34 typedef struct NvmeNamespace NvmeNamespace; 35 36 #define TYPE_NVME_BUS "nvme-bus" 37 OBJECT_DECLARE_SIMPLE_TYPE(NvmeBus, NVME_BUS) 38 39 typedef struct NvmeBus { 40 BusState parent_bus; 41 } NvmeBus; 42 43 #define TYPE_NVME_SUBSYS "nvme-subsys" 44 #define NVME_SUBSYS(obj) \ 45 OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS) 46 47 typedef struct NvmeSubsystem { 48 DeviceState parent_obj; 49 NvmeBus bus; 50 uint8_t subnqn[256]; 51 52 NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS]; 53 NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; 54 55 struct { 56 char *nqn; 57 } params; 58 } NvmeSubsystem; 59 60 int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp); 61 void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n); 62 63 static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys, 64 uint32_t cntlid) 65 { 66 if (!subsys || cntlid >= NVME_MAX_CONTROLLERS) { 67 return NULL; 68 } 69 70 return subsys->ctrls[cntlid]; 71 } 72 73 static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys, 74 uint32_t nsid) 75 { 76 if (!subsys || !nsid || nsid > NVME_MAX_NAMESPACES) { 77 return NULL; 78 } 79 80 return subsys->namespaces[nsid]; 81 } 82 83 #define TYPE_NVME_NS "nvme-ns" 84 #define NVME_NS(obj) \ 85 OBJECT_CHECK(NvmeNamespace, (obj), TYPE_NVME_NS) 86 87 typedef struct NvmeZone { 88 NvmeZoneDescr d; 89 uint64_t w_ptr; 90 QTAILQ_ENTRY(NvmeZone) entry; 91 } NvmeZone; 92 93 typedef struct NvmeNamespaceParams { 94 bool detached; 95 bool shared; 96 uint32_t nsid; 97 QemuUUID uuid; 98 uint64_t eui64; 99 bool eui64_default; 100 101 uint16_t ms; 102 uint8_t mset; 103 uint8_t pi; 104 uint8_t pil; 105 106 uint16_t mssrl; 107 uint32_t mcl; 108 uint8_t msrc; 109 110 bool zoned; 111 bool cross_zone_read; 112 uint64_t zone_size_bs; 113 uint64_t zone_cap_bs; 114 uint32_t max_active_zones; 115 uint32_t max_open_zones; 116 uint32_t zd_extension_size; 117 118 uint32_t numzrwa; 119 uint64_t zrwas; 120 uint64_t zrwafg; 121 } NvmeNamespaceParams; 122 123 typedef struct NvmeNamespace { 124 DeviceState parent_obj; 125 BlockConf blkconf; 126 int32_t bootindex; 127 int64_t size; 128 int64_t moff; 129 NvmeIdNs id_ns; 130 NvmeLBAF lbaf; 131 size_t lbasz; 132 const uint32_t *iocs; 133 uint8_t csi; 134 uint16_t status; 135 int attached; 136 137 struct { 138 uint16_t zrwas; 139 uint16_t zrwafg; 140 uint32_t numzrwa; 141 } zns; 142 143 QTAILQ_ENTRY(NvmeNamespace) entry; 144 145 NvmeIdNsZoned *id_ns_zoned; 146 NvmeZone *zone_array; 147 QTAILQ_HEAD(, NvmeZone) exp_open_zones; 148 QTAILQ_HEAD(, NvmeZone) imp_open_zones; 149 QTAILQ_HEAD(, NvmeZone) closed_zones; 150 QTAILQ_HEAD(, NvmeZone) full_zones; 151 uint32_t num_zones; 152 uint64_t zone_size; 153 uint64_t zone_capacity; 154 uint32_t zone_size_log2; 155 uint8_t *zd_extensions; 156 int32_t nr_open_zones; 157 int32_t nr_active_zones; 158 159 NvmeNamespaceParams params; 160 161 struct { 162 uint32_t err_rec; 163 } features; 164 } NvmeNamespace; 165 166 static inline uint32_t nvme_nsid(NvmeNamespace *ns) 167 { 168 if (ns) { 169 return ns->params.nsid; 170 } 171 172 return 0; 173 } 174 175 static inline size_t nvme_l2b(NvmeNamespace *ns, uint64_t lba) 176 { 177 return lba << ns->lbaf.ds; 178 } 179 180 static inline size_t nvme_m2b(NvmeNamespace *ns, uint64_t lba) 181 { 182 return ns->lbaf.ms * lba; 183 } 184 185 static inline int64_t nvme_moff(NvmeNamespace *ns, uint64_t lba) 186 { 187 return ns->moff + nvme_m2b(ns, lba); 188 } 189 190 static inline bool nvme_ns_ext(NvmeNamespace *ns) 191 { 192 return !!NVME_ID_NS_FLBAS_EXTENDED(ns->id_ns.flbas); 193 } 194 195 static inline NvmeZoneState nvme_get_zone_state(NvmeZone *zone) 196 { 197 return zone->d.zs >> 4; 198 } 199 200 static inline void nvme_set_zone_state(NvmeZone *zone, NvmeZoneState state) 201 { 202 zone->d.zs = state << 4; 203 } 204 205 static inline uint64_t nvme_zone_rd_boundary(NvmeNamespace *ns, NvmeZone *zone) 206 { 207 return zone->d.zslba + ns->zone_size; 208 } 209 210 static inline uint64_t nvme_zone_wr_boundary(NvmeZone *zone) 211 { 212 return zone->d.zslba + zone->d.zcap; 213 } 214 215 static inline bool nvme_wp_is_valid(NvmeZone *zone) 216 { 217 uint8_t st = nvme_get_zone_state(zone); 218 219 return st != NVME_ZONE_STATE_FULL && 220 st != NVME_ZONE_STATE_READ_ONLY && 221 st != NVME_ZONE_STATE_OFFLINE; 222 } 223 224 static inline uint8_t *nvme_get_zd_extension(NvmeNamespace *ns, 225 uint32_t zone_idx) 226 { 227 return &ns->zd_extensions[zone_idx * ns->params.zd_extension_size]; 228 } 229 230 static inline void nvme_aor_inc_open(NvmeNamespace *ns) 231 { 232 assert(ns->nr_open_zones >= 0); 233 if (ns->params.max_open_zones) { 234 ns->nr_open_zones++; 235 assert(ns->nr_open_zones <= ns->params.max_open_zones); 236 } 237 } 238 239 static inline void nvme_aor_dec_open(NvmeNamespace *ns) 240 { 241 if (ns->params.max_open_zones) { 242 assert(ns->nr_open_zones > 0); 243 ns->nr_open_zones--; 244 } 245 assert(ns->nr_open_zones >= 0); 246 } 247 248 static inline void nvme_aor_inc_active(NvmeNamespace *ns) 249 { 250 assert(ns->nr_active_zones >= 0); 251 if (ns->params.max_active_zones) { 252 ns->nr_active_zones++; 253 assert(ns->nr_active_zones <= ns->params.max_active_zones); 254 } 255 } 256 257 static inline void nvme_aor_dec_active(NvmeNamespace *ns) 258 { 259 if (ns->params.max_active_zones) { 260 assert(ns->nr_active_zones > 0); 261 ns->nr_active_zones--; 262 assert(ns->nr_active_zones >= ns->nr_open_zones); 263 } 264 assert(ns->nr_active_zones >= 0); 265 } 266 267 void nvme_ns_init_format(NvmeNamespace *ns); 268 int nvme_ns_setup(NvmeNamespace *ns, Error **errp); 269 void nvme_ns_drain(NvmeNamespace *ns); 270 void nvme_ns_shutdown(NvmeNamespace *ns); 271 void nvme_ns_cleanup(NvmeNamespace *ns); 272 273 typedef struct NvmeAsyncEvent { 274 QTAILQ_ENTRY(NvmeAsyncEvent) entry; 275 NvmeAerResult result; 276 } NvmeAsyncEvent; 277 278 enum { 279 NVME_SG_ALLOC = 1 << 0, 280 NVME_SG_DMA = 1 << 1, 281 }; 282 283 typedef struct NvmeSg { 284 int flags; 285 286 union { 287 QEMUSGList qsg; 288 QEMUIOVector iov; 289 }; 290 } NvmeSg; 291 292 typedef enum NvmeTxDirection { 293 NVME_TX_DIRECTION_TO_DEVICE = 0, 294 NVME_TX_DIRECTION_FROM_DEVICE = 1, 295 } NvmeTxDirection; 296 297 typedef struct NvmeRequest { 298 struct NvmeSQueue *sq; 299 struct NvmeNamespace *ns; 300 BlockAIOCB *aiocb; 301 uint16_t status; 302 void *opaque; 303 NvmeCqe cqe; 304 NvmeCmd cmd; 305 BlockAcctCookie acct; 306 NvmeSg sg; 307 QTAILQ_ENTRY(NvmeRequest)entry; 308 } NvmeRequest; 309 310 typedef struct NvmeBounceContext { 311 NvmeRequest *req; 312 313 struct { 314 QEMUIOVector iov; 315 uint8_t *bounce; 316 } data, mdata; 317 } NvmeBounceContext; 318 319 static inline const char *nvme_adm_opc_str(uint8_t opc) 320 { 321 switch (opc) { 322 case NVME_ADM_CMD_DELETE_SQ: return "NVME_ADM_CMD_DELETE_SQ"; 323 case NVME_ADM_CMD_CREATE_SQ: return "NVME_ADM_CMD_CREATE_SQ"; 324 case NVME_ADM_CMD_GET_LOG_PAGE: return "NVME_ADM_CMD_GET_LOG_PAGE"; 325 case NVME_ADM_CMD_DELETE_CQ: return "NVME_ADM_CMD_DELETE_CQ"; 326 case NVME_ADM_CMD_CREATE_CQ: return "NVME_ADM_CMD_CREATE_CQ"; 327 case NVME_ADM_CMD_IDENTIFY: return "NVME_ADM_CMD_IDENTIFY"; 328 case NVME_ADM_CMD_ABORT: return "NVME_ADM_CMD_ABORT"; 329 case NVME_ADM_CMD_SET_FEATURES: return "NVME_ADM_CMD_SET_FEATURES"; 330 case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES"; 331 case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ"; 332 case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT"; 333 case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM"; 334 default: return "NVME_ADM_CMD_UNKNOWN"; 335 } 336 } 337 338 static inline const char *nvme_io_opc_str(uint8_t opc) 339 { 340 switch (opc) { 341 case NVME_CMD_FLUSH: return "NVME_NVM_CMD_FLUSH"; 342 case NVME_CMD_WRITE: return "NVME_NVM_CMD_WRITE"; 343 case NVME_CMD_READ: return "NVME_NVM_CMD_READ"; 344 case NVME_CMD_COMPARE: return "NVME_NVM_CMD_COMPARE"; 345 case NVME_CMD_WRITE_ZEROES: return "NVME_NVM_CMD_WRITE_ZEROES"; 346 case NVME_CMD_DSM: return "NVME_NVM_CMD_DSM"; 347 case NVME_CMD_VERIFY: return "NVME_NVM_CMD_VERIFY"; 348 case NVME_CMD_COPY: return "NVME_NVM_CMD_COPY"; 349 case NVME_CMD_ZONE_MGMT_SEND: return "NVME_ZONED_CMD_MGMT_SEND"; 350 case NVME_CMD_ZONE_MGMT_RECV: return "NVME_ZONED_CMD_MGMT_RECV"; 351 case NVME_CMD_ZONE_APPEND: return "NVME_ZONED_CMD_ZONE_APPEND"; 352 default: return "NVME_NVM_CMD_UNKNOWN"; 353 } 354 } 355 356 typedef struct NvmeSQueue { 357 struct NvmeCtrl *ctrl; 358 uint16_t sqid; 359 uint16_t cqid; 360 uint32_t head; 361 uint32_t tail; 362 uint32_t size; 363 uint64_t dma_addr; 364 QEMUTimer *timer; 365 NvmeRequest *io_req; 366 QTAILQ_HEAD(, NvmeRequest) req_list; 367 QTAILQ_HEAD(, NvmeRequest) out_req_list; 368 QTAILQ_ENTRY(NvmeSQueue) entry; 369 } NvmeSQueue; 370 371 typedef struct NvmeCQueue { 372 struct NvmeCtrl *ctrl; 373 uint8_t phase; 374 uint16_t cqid; 375 uint16_t irq_enabled; 376 uint32_t head; 377 uint32_t tail; 378 uint32_t vector; 379 uint32_t size; 380 uint64_t dma_addr; 381 QEMUTimer *timer; 382 QTAILQ_HEAD(, NvmeSQueue) sq_list; 383 QTAILQ_HEAD(, NvmeRequest) req_list; 384 } NvmeCQueue; 385 386 #define TYPE_NVME "nvme" 387 #define NVME(obj) \ 388 OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME) 389 390 typedef struct NvmeParams { 391 char *serial; 392 uint32_t num_queues; /* deprecated since 5.1 */ 393 uint32_t max_ioqpairs; 394 uint16_t msix_qsize; 395 uint32_t cmb_size_mb; 396 uint8_t aerl; 397 uint32_t aer_max_queued; 398 uint8_t mdts; 399 uint8_t vsl; 400 bool use_intel_id; 401 uint8_t zasl; 402 bool auto_transition_zones; 403 bool legacy_cmb; 404 } NvmeParams; 405 406 typedef struct NvmeCtrl { 407 PCIDevice parent_obj; 408 MemoryRegion bar0; 409 MemoryRegion iomem; 410 NvmeBar bar; 411 NvmeParams params; 412 NvmeBus bus; 413 414 uint16_t cntlid; 415 bool qs_created; 416 uint32_t page_size; 417 uint16_t page_bits; 418 uint16_t max_prp_ents; 419 uint16_t cqe_size; 420 uint16_t sqe_size; 421 uint32_t reg_size; 422 uint32_t max_q_ents; 423 uint8_t outstanding_aers; 424 uint32_t irq_status; 425 int cq_pending; 426 uint64_t host_timestamp; /* Timestamp sent by the host */ 427 uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */ 428 uint64_t starttime_ms; 429 uint16_t temperature; 430 uint8_t smart_critical_warning; 431 432 struct { 433 MemoryRegion mem; 434 uint8_t *buf; 435 bool cmse; 436 hwaddr cba; 437 } cmb; 438 439 struct { 440 HostMemoryBackend *dev; 441 bool cmse; 442 hwaddr cba; 443 } pmr; 444 445 uint8_t aer_mask; 446 NvmeRequest **aer_reqs; 447 QTAILQ_HEAD(, NvmeAsyncEvent) aer_queue; 448 int aer_queued; 449 450 uint32_t dmrsl; 451 452 /* Namespace ID is started with 1 so bitmap should be 1-based */ 453 #define NVME_CHANGED_NSID_SIZE (NVME_MAX_NAMESPACES + 1) 454 DECLARE_BITMAP(changed_nsids, NVME_CHANGED_NSID_SIZE); 455 456 NvmeSubsystem *subsys; 457 458 NvmeNamespace namespace; 459 NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; 460 NvmeSQueue **sq; 461 NvmeCQueue **cq; 462 NvmeSQueue admin_sq; 463 NvmeCQueue admin_cq; 464 NvmeIdCtrl id_ctrl; 465 466 struct { 467 struct { 468 uint16_t temp_thresh_hi; 469 uint16_t temp_thresh_low; 470 }; 471 uint32_t async_config; 472 } features; 473 } NvmeCtrl; 474 475 static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid) 476 { 477 if (!nsid || nsid > NVME_MAX_NAMESPACES) { 478 return NULL; 479 } 480 481 return n->namespaces[nsid]; 482 } 483 484 static inline NvmeCQueue *nvme_cq(NvmeRequest *req) 485 { 486 NvmeSQueue *sq = req->sq; 487 NvmeCtrl *n = sq->ctrl; 488 489 return n->cq[sq->cqid]; 490 } 491 492 static inline NvmeCtrl *nvme_ctrl(NvmeRequest *req) 493 { 494 NvmeSQueue *sq = req->sq; 495 return sq->ctrl; 496 } 497 498 static inline uint16_t nvme_cid(NvmeRequest *req) 499 { 500 if (!req) { 501 return 0xffff; 502 } 503 504 return le16_to_cpu(req->cqe.cid); 505 } 506 507 void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns); 508 uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len, 509 NvmeTxDirection dir, NvmeRequest *req); 510 uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len, 511 NvmeTxDirection dir, NvmeRequest *req); 512 void nvme_rw_complete_cb(void *opaque, int ret); 513 uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, 514 NvmeCmd *cmd); 515 516 /* from Linux kernel (crypto/crct10dif_common.c) */ 517 static const uint16_t t10_dif_crc_table[256] = { 518 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, 519 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, 520 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, 521 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, 522 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, 523 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, 524 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, 525 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, 526 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, 527 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, 528 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, 529 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, 530 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, 531 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, 532 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, 533 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, 534 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, 535 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, 536 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, 537 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, 538 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, 539 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, 540 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, 541 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, 542 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, 543 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, 544 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, 545 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, 546 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, 547 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, 548 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, 549 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 550 }; 551 552 uint16_t nvme_check_prinfo(NvmeNamespace *ns, uint8_t prinfo, uint64_t slba, 553 uint32_t reftag); 554 uint16_t nvme_dif_mangle_mdata(NvmeNamespace *ns, uint8_t *mbuf, size_t mlen, 555 uint64_t slba); 556 void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len, 557 uint8_t *mbuf, size_t mlen, uint16_t apptag, 558 uint32_t *reftag); 559 uint16_t nvme_dif_check(NvmeNamespace *ns, uint8_t *buf, size_t len, 560 uint8_t *mbuf, size_t mlen, uint8_t prinfo, 561 uint64_t slba, uint16_t apptag, 562 uint16_t appmask, uint32_t *reftag); 563 uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req); 564 565 566 #endif /* HW_NVME_INTERNAL_H */ 567