1 /* 2 * QEMU NVM Express Controller 3 * 4 * Copyright (c) 2012, Intel Corporation 5 * 6 * Written by Keith Busch <keith.busch@intel.com> 7 * 8 * This code is licensed under the GNU GPL v2 or later. 9 */ 10 11 /** 12 * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e 13 * 14 * https://nvmexpress.org/developers/nvme-specification/ 15 * 16 * 17 * Notes on coding style 18 * --------------------- 19 * While QEMU coding style prefers lowercase hexadecimals in constants, the 20 * NVMe subsystem use thes format from the NVMe specifications in the comments 21 * (i.e. 'h' suffix instead of '0x' prefix). 22 * 23 * Usage 24 * ----- 25 * See docs/system/nvme.rst for extensive documentation. 26 * 27 * Add options: 28 * -drive file=<file>,if=none,id=<drive_id> 29 * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id> 30 * -device nvme,serial=<serial>,id=<bus_name>, \ 31 * cmb_size_mb=<cmb_size_mb[optional]>, \ 32 * [pmrdev=<mem_backend_file_id>,] \ 33 * max_ioqpairs=<N[optional]>, \ 34 * aerl=<N[optional]>,aer_max_queued=<N[optional]>, \ 35 * mdts=<N[optional]>,vsl=<N[optional]>, \ 36 * zoned.zasl=<N[optional]>, \ 37 * zoned.auto_transition=<on|off[optional]>, \ 38 * sriov_max_vfs=<N[optional]> \ 39 * sriov_vq_flexible=<N[optional]> \ 40 * sriov_vi_flexible=<N[optional]> \ 41 * sriov_max_vi_per_vf=<N[optional]> \ 42 * sriov_max_vq_per_vf=<N[optional]> \ 43 * subsys=<subsys_id> 44 * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\ 45 * zoned=<true|false[optional]>, \ 46 * subsys=<subsys_id>,detached=<true|false[optional]> 47 * 48 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at 49 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the 50 * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to 51 * always enable the CMBLOC and CMBSZ registers (v1.3 behavior). 52 * 53 * Enabling pmr emulation can be achieved by pointing to memory-backend-file. 54 * For example: 55 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \ 56 * size=<size> .... -device nvme,...,pmrdev=<mem_id> 57 * 58 * The PMR will use BAR 4/5 exclusively. 59 * 60 * To place controller(s) and namespace(s) to a subsystem, then provide 61 * nvme-subsys device as above. 62 * 63 * nvme subsystem device parameters 64 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 * - `nqn` 66 * This parameter provides the `<nqn_id>` part of the string 67 * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field 68 * of subsystem controllers. Note that `<nqn_id>` should be unique per 69 * subsystem, but this is not enforced by QEMU. If not specified, it will 70 * default to the value of the `id` parameter (`<subsys_id>`). 71 * 72 * nvme device parameters 73 * ~~~~~~~~~~~~~~~~~~~~~~ 74 * - `subsys` 75 * Specifying this parameter attaches the controller to the subsystem and 76 * the SUBNQN field in the controller will report the NQN of the subsystem 77 * device. This also enables multi controller capability represented in 78 * Identify Controller data structure in CMIC (Controller Multi-path I/O and 79 * Namespace Sharing Capabilities). 80 * 81 * - `aerl` 82 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number 83 * of concurrently outstanding Asynchronous Event Request commands support 84 * by the controller. This is a 0's based value. 85 * 86 * - `aer_max_queued` 87 * This is the maximum number of events that the device will enqueue for 88 * completion when there are no outstanding AERs. When the maximum number of 89 * enqueued events are reached, subsequent events will be dropped. 90 * 91 * - `mdts` 92 * Indicates the maximum data transfer size for a command that transfers data 93 * between host-accessible memory and the controller. The value is specified 94 * as a power of two (2^n) and is in units of the minimum memory page size 95 * (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB). 96 * 97 * - `vsl` 98 * Indicates the maximum data size limit for the Verify command. Like `mdts`, 99 * this value is specified as a power of two (2^n) and is in units of the 100 * minimum memory page size (CAP.MPSMIN). The default value is 7 (i.e. 512 101 * KiB). 102 * 103 * - `zoned.zasl` 104 * Indicates the maximum data transfer size for the Zone Append command. Like 105 * `mdts`, the value is specified as a power of two (2^n) and is in units of 106 * the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e. 107 * defaulting to the value of `mdts`). 108 * 109 * - `zoned.auto_transition` 110 * Indicates if zones in zone state implicitly opened can be automatically 111 * transitioned to zone state closed for resource management purposes. 112 * Defaults to 'on'. 113 * 114 * - `sriov_max_vfs` 115 * Indicates the maximum number of PCIe virtual functions supported 116 * by the controller. The default value is 0. Specifying a non-zero value 117 * enables reporting of both SR-IOV and ARI capabilities by the NVMe device. 118 * Virtual function controllers will not report SR-IOV capability. 119 * 120 * NOTE: Single Root I/O Virtualization support is experimental. 121 * All the related parameters may be subject to change. 122 * 123 * - `sriov_vq_flexible` 124 * Indicates the total number of flexible queue resources assignable to all 125 * the secondary controllers. Implicitly sets the number of primary 126 * controller's private resources to `(max_ioqpairs - sriov_vq_flexible)`. 127 * 128 * - `sriov_vi_flexible` 129 * Indicates the total number of flexible interrupt resources assignable to 130 * all the secondary controllers. Implicitly sets the number of primary 131 * controller's private resources to `(msix_qsize - sriov_vi_flexible)`. 132 * 133 * - `sriov_max_vi_per_vf` 134 * Indicates the maximum number of virtual interrupt resources assignable 135 * to a secondary controller. The default 0 resolves to 136 * `(sriov_vi_flexible / sriov_max_vfs)`. 137 * 138 * - `sriov_max_vq_per_vf` 139 * Indicates the maximum number of virtual queue resources assignable to 140 * a secondary controller. The default 0 resolves to 141 * `(sriov_vq_flexible / sriov_max_vfs)`. 142 * 143 * nvme namespace device parameters 144 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 145 * - `shared` 146 * When the parent nvme device (as defined explicitly by the 'bus' parameter 147 * or implicitly by the most recently defined NvmeBus) is linked to an 148 * nvme-subsys device, the namespace will be attached to all controllers in 149 * the subsystem. If set to 'off' (the default), the namespace will remain a 150 * private namespace and may only be attached to a single controller at a 151 * time. 152 * 153 * - `detached` 154 * This parameter is only valid together with the `subsys` parameter. If left 155 * at the default value (`false/off`), the namespace will be attached to all 156 * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the 157 * namespace will be available in the subsystem but not attached to any 158 * controllers. 159 * 160 * Setting `zoned` to true selects Zoned Command Set at the namespace. 161 * In this case, the following namespace properties are available to configure 162 * zoned operation: 163 * zoned.zone_size=<zone size in bytes, default: 128MiB> 164 * The number may be followed by K, M, G as in kilo-, mega- or giga-. 165 * 166 * zoned.zone_capacity=<zone capacity in bytes, default: zone size> 167 * The value 0 (default) forces zone capacity to be the same as zone 168 * size. The value of this property may not exceed zone size. 169 * 170 * zoned.descr_ext_size=<zone descriptor extension size, default 0> 171 * This value needs to be specified in 64B units. If it is zero, 172 * namespace(s) will not support zone descriptor extensions. 173 * 174 * zoned.max_active=<Maximum Active Resources (zones), default: 0> 175 * The default value means there is no limit to the number of 176 * concurrently active zones. 177 * 178 * zoned.max_open=<Maximum Open Resources (zones), default: 0> 179 * The default value means there is no limit to the number of 180 * concurrently open zones. 181 * 182 * zoned.cross_read=<enable RAZB, default: false> 183 * Setting this property to true enables Read Across Zone Boundaries. 184 */ 185 186 #include "qemu/osdep.h" 187 #include "qemu/cutils.h" 188 #include "qemu/error-report.h" 189 #include "qemu/log.h" 190 #include "qemu/units.h" 191 #include "qemu/range.h" 192 #include "qapi/error.h" 193 #include "qapi/visitor.h" 194 #include "sysemu/sysemu.h" 195 #include "sysemu/block-backend.h" 196 #include "sysemu/hostmem.h" 197 #include "hw/pci/msix.h" 198 #include "hw/pci/pcie_sriov.h" 199 #include "migration/vmstate.h" 200 201 #include "nvme.h" 202 #include "dif.h" 203 #include "trace.h" 204 205 #define NVME_MAX_IOQPAIRS 0xffff 206 #define NVME_DB_SIZE 4 207 #define NVME_SPEC_VER 0x00010400 208 #define NVME_CMB_BIR 2 209 #define NVME_PMR_BIR 4 210 #define NVME_TEMPERATURE 0x143 211 #define NVME_TEMPERATURE_WARNING 0x157 212 #define NVME_TEMPERATURE_CRITICAL 0x175 213 #define NVME_NUM_FW_SLOTS 1 214 #define NVME_DEFAULT_MAX_ZA_SIZE (128 * KiB) 215 #define NVME_MAX_VFS 127 216 #define NVME_VF_RES_GRANULARITY 1 217 #define NVME_VF_OFFSET 0x1 218 #define NVME_VF_STRIDE 1 219 220 #define NVME_GUEST_ERR(trace, fmt, ...) \ 221 do { \ 222 (trace_##trace)(__VA_ARGS__); \ 223 qemu_log_mask(LOG_GUEST_ERROR, #trace \ 224 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \ 225 } while (0) 226 227 static const bool nvme_feature_support[NVME_FID_MAX] = { 228 [NVME_ARBITRATION] = true, 229 [NVME_POWER_MANAGEMENT] = true, 230 [NVME_TEMPERATURE_THRESHOLD] = true, 231 [NVME_ERROR_RECOVERY] = true, 232 [NVME_VOLATILE_WRITE_CACHE] = true, 233 [NVME_NUMBER_OF_QUEUES] = true, 234 [NVME_INTERRUPT_COALESCING] = true, 235 [NVME_INTERRUPT_VECTOR_CONF] = true, 236 [NVME_WRITE_ATOMICITY] = true, 237 [NVME_ASYNCHRONOUS_EVENT_CONF] = true, 238 [NVME_TIMESTAMP] = true, 239 [NVME_HOST_BEHAVIOR_SUPPORT] = true, 240 [NVME_COMMAND_SET_PROFILE] = true, 241 }; 242 243 static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { 244 [NVME_TEMPERATURE_THRESHOLD] = NVME_FEAT_CAP_CHANGE, 245 [NVME_ERROR_RECOVERY] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS, 246 [NVME_VOLATILE_WRITE_CACHE] = NVME_FEAT_CAP_CHANGE, 247 [NVME_NUMBER_OF_QUEUES] = NVME_FEAT_CAP_CHANGE, 248 [NVME_ASYNCHRONOUS_EVENT_CONF] = NVME_FEAT_CAP_CHANGE, 249 [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE, 250 [NVME_HOST_BEHAVIOR_SUPPORT] = NVME_FEAT_CAP_CHANGE, 251 [NVME_COMMAND_SET_PROFILE] = NVME_FEAT_CAP_CHANGE, 252 }; 253 254 static const uint32_t nvme_cse_acs[256] = { 255 [NVME_ADM_CMD_DELETE_SQ] = NVME_CMD_EFF_CSUPP, 256 [NVME_ADM_CMD_CREATE_SQ] = NVME_CMD_EFF_CSUPP, 257 [NVME_ADM_CMD_GET_LOG_PAGE] = NVME_CMD_EFF_CSUPP, 258 [NVME_ADM_CMD_DELETE_CQ] = NVME_CMD_EFF_CSUPP, 259 [NVME_ADM_CMD_CREATE_CQ] = NVME_CMD_EFF_CSUPP, 260 [NVME_ADM_CMD_IDENTIFY] = NVME_CMD_EFF_CSUPP, 261 [NVME_ADM_CMD_ABORT] = NVME_CMD_EFF_CSUPP, 262 [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP, 263 [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP, 264 [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP, 265 [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC, 266 [NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP, 267 [NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP, 268 [NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 269 }; 270 271 static const uint32_t nvme_cse_iocs_none[256]; 272 273 static const uint32_t nvme_cse_iocs_nvm[256] = { 274 [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 275 [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 276 [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 277 [NVME_CMD_READ] = NVME_CMD_EFF_CSUPP, 278 [NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 279 [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, 280 [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 281 [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, 282 }; 283 284 static const uint32_t nvme_cse_iocs_zoned[256] = { 285 [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 286 [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 287 [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 288 [NVME_CMD_READ] = NVME_CMD_EFF_CSUPP, 289 [NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 290 [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, 291 [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 292 [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, 293 [NVME_CMD_ZONE_APPEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 294 [NVME_CMD_ZONE_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 295 [NVME_CMD_ZONE_MGMT_RECV] = NVME_CMD_EFF_CSUPP, 296 }; 297 298 static void nvme_process_sq(void *opaque); 299 static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst); 300 301 static uint16_t nvme_sqid(NvmeRequest *req) 302 { 303 return le16_to_cpu(req->sq->sqid); 304 } 305 306 static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone, 307 NvmeZoneState state) 308 { 309 if (QTAILQ_IN_USE(zone, entry)) { 310 switch (nvme_get_zone_state(zone)) { 311 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 312 QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry); 313 break; 314 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 315 QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); 316 break; 317 case NVME_ZONE_STATE_CLOSED: 318 QTAILQ_REMOVE(&ns->closed_zones, zone, entry); 319 break; 320 case NVME_ZONE_STATE_FULL: 321 QTAILQ_REMOVE(&ns->full_zones, zone, entry); 322 default: 323 ; 324 } 325 } 326 327 nvme_set_zone_state(zone, state); 328 329 switch (state) { 330 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 331 QTAILQ_INSERT_TAIL(&ns->exp_open_zones, zone, entry); 332 break; 333 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 334 QTAILQ_INSERT_TAIL(&ns->imp_open_zones, zone, entry); 335 break; 336 case NVME_ZONE_STATE_CLOSED: 337 QTAILQ_INSERT_TAIL(&ns->closed_zones, zone, entry); 338 break; 339 case NVME_ZONE_STATE_FULL: 340 QTAILQ_INSERT_TAIL(&ns->full_zones, zone, entry); 341 case NVME_ZONE_STATE_READ_ONLY: 342 break; 343 default: 344 zone->d.za = 0; 345 } 346 } 347 348 static uint16_t nvme_zns_check_resources(NvmeNamespace *ns, uint32_t act, 349 uint32_t opn, uint32_t zrwa) 350 { 351 if (ns->params.max_active_zones != 0 && 352 ns->nr_active_zones + act > ns->params.max_active_zones) { 353 trace_pci_nvme_err_insuff_active_res(ns->params.max_active_zones); 354 return NVME_ZONE_TOO_MANY_ACTIVE | NVME_DNR; 355 } 356 357 if (ns->params.max_open_zones != 0 && 358 ns->nr_open_zones + opn > ns->params.max_open_zones) { 359 trace_pci_nvme_err_insuff_open_res(ns->params.max_open_zones); 360 return NVME_ZONE_TOO_MANY_OPEN | NVME_DNR; 361 } 362 363 if (zrwa > ns->zns.numzrwa) { 364 return NVME_NOZRWA | NVME_DNR; 365 } 366 367 return NVME_SUCCESS; 368 } 369 370 /* 371 * Check if we can open a zone without exceeding open/active limits. 372 * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5). 373 */ 374 static uint16_t nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn) 375 { 376 return nvme_zns_check_resources(ns, act, opn, 0); 377 } 378 379 static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr) 380 { 381 hwaddr hi, lo; 382 383 if (!n->cmb.cmse) { 384 return false; 385 } 386 387 lo = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; 388 hi = lo + int128_get64(n->cmb.mem.size); 389 390 return addr >= lo && addr < hi; 391 } 392 393 static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr) 394 { 395 hwaddr base = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; 396 return &n->cmb.buf[addr - base]; 397 } 398 399 static bool nvme_addr_is_pmr(NvmeCtrl *n, hwaddr addr) 400 { 401 hwaddr hi; 402 403 if (!n->pmr.cmse) { 404 return false; 405 } 406 407 hi = n->pmr.cba + int128_get64(n->pmr.dev->mr.size); 408 409 return addr >= n->pmr.cba && addr < hi; 410 } 411 412 static inline void *nvme_addr_to_pmr(NvmeCtrl *n, hwaddr addr) 413 { 414 return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba); 415 } 416 417 static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr) 418 { 419 hwaddr hi, lo; 420 421 /* 422 * The purpose of this check is to guard against invalid "local" access to 423 * the iomem (i.e. controller registers). Thus, we check against the range 424 * covered by the 'bar0' MemoryRegion since that is currently composed of 425 * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however, 426 * that if the device model is ever changed to allow the CMB to be located 427 * in BAR0 as well, then this must be changed. 428 */ 429 lo = n->bar0.addr; 430 hi = lo + int128_get64(n->bar0.size); 431 432 return addr >= lo && addr < hi; 433 } 434 435 static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) 436 { 437 hwaddr hi = addr + size - 1; 438 if (hi < addr) { 439 return 1; 440 } 441 442 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { 443 memcpy(buf, nvme_addr_to_cmb(n, addr), size); 444 return 0; 445 } 446 447 if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) { 448 memcpy(buf, nvme_addr_to_pmr(n, addr), size); 449 return 0; 450 } 451 452 return pci_dma_read(&n->parent_obj, addr, buf, size); 453 } 454 455 static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, const void *buf, int size) 456 { 457 hwaddr hi = addr + size - 1; 458 if (hi < addr) { 459 return 1; 460 } 461 462 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { 463 memcpy(nvme_addr_to_cmb(n, addr), buf, size); 464 return 0; 465 } 466 467 if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) { 468 memcpy(nvme_addr_to_pmr(n, addr), buf, size); 469 return 0; 470 } 471 472 return pci_dma_write(&n->parent_obj, addr, buf, size); 473 } 474 475 static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid) 476 { 477 return nsid && 478 (nsid == NVME_NSID_BROADCAST || nsid <= NVME_MAX_NAMESPACES); 479 } 480 481 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid) 482 { 483 return sqid < n->conf_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1; 484 } 485 486 static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid) 487 { 488 return cqid < n->conf_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1; 489 } 490 491 static void nvme_inc_cq_tail(NvmeCQueue *cq) 492 { 493 cq->tail++; 494 if (cq->tail >= cq->size) { 495 cq->tail = 0; 496 cq->phase = !cq->phase; 497 } 498 } 499 500 static void nvme_inc_sq_head(NvmeSQueue *sq) 501 { 502 sq->head = (sq->head + 1) % sq->size; 503 } 504 505 static uint8_t nvme_cq_full(NvmeCQueue *cq) 506 { 507 return (cq->tail + 1) % cq->size == cq->head; 508 } 509 510 static uint8_t nvme_sq_empty(NvmeSQueue *sq) 511 { 512 return sq->head == sq->tail; 513 } 514 515 static void nvme_irq_check(NvmeCtrl *n) 516 { 517 uint32_t intms = ldl_le_p(&n->bar.intms); 518 519 if (msix_enabled(&(n->parent_obj))) { 520 return; 521 } 522 if (~intms & n->irq_status) { 523 pci_irq_assert(&n->parent_obj); 524 } else { 525 pci_irq_deassert(&n->parent_obj); 526 } 527 } 528 529 static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq) 530 { 531 if (cq->irq_enabled) { 532 if (msix_enabled(&(n->parent_obj))) { 533 trace_pci_nvme_irq_msix(cq->vector); 534 msix_notify(&(n->parent_obj), cq->vector); 535 } else { 536 trace_pci_nvme_irq_pin(); 537 assert(cq->vector < 32); 538 n->irq_status |= 1 << cq->vector; 539 nvme_irq_check(n); 540 } 541 } else { 542 trace_pci_nvme_irq_masked(); 543 } 544 } 545 546 static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq) 547 { 548 if (cq->irq_enabled) { 549 if (msix_enabled(&(n->parent_obj))) { 550 return; 551 } else { 552 assert(cq->vector < 32); 553 if (!n->cq_pending) { 554 n->irq_status &= ~(1 << cq->vector); 555 } 556 nvme_irq_check(n); 557 } 558 } 559 } 560 561 static void nvme_req_clear(NvmeRequest *req) 562 { 563 req->ns = NULL; 564 req->opaque = NULL; 565 req->aiocb = NULL; 566 memset(&req->cqe, 0x0, sizeof(req->cqe)); 567 req->status = NVME_SUCCESS; 568 } 569 570 static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma) 571 { 572 if (dma) { 573 pci_dma_sglist_init(&sg->qsg, &n->parent_obj, 0); 574 sg->flags = NVME_SG_DMA; 575 } else { 576 qemu_iovec_init(&sg->iov, 0); 577 } 578 579 sg->flags |= NVME_SG_ALLOC; 580 } 581 582 static inline void nvme_sg_unmap(NvmeSg *sg) 583 { 584 if (!(sg->flags & NVME_SG_ALLOC)) { 585 return; 586 } 587 588 if (sg->flags & NVME_SG_DMA) { 589 qemu_sglist_destroy(&sg->qsg); 590 } else { 591 qemu_iovec_destroy(&sg->iov); 592 } 593 594 memset(sg, 0x0, sizeof(*sg)); 595 } 596 597 /* 598 * When metadata is transfered as extended LBAs, the DPTR mapped into `sg` 599 * holds both data and metadata. This function splits the data and metadata 600 * into two separate QSG/IOVs. 601 */ 602 static void nvme_sg_split(NvmeSg *sg, NvmeNamespace *ns, NvmeSg *data, 603 NvmeSg *mdata) 604 { 605 NvmeSg *dst = data; 606 uint32_t trans_len, count = ns->lbasz; 607 uint64_t offset = 0; 608 bool dma = sg->flags & NVME_SG_DMA; 609 size_t sge_len; 610 size_t sg_len = dma ? sg->qsg.size : sg->iov.size; 611 int sg_idx = 0; 612 613 assert(sg->flags & NVME_SG_ALLOC); 614 615 while (sg_len) { 616 sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; 617 618 trans_len = MIN(sg_len, count); 619 trans_len = MIN(trans_len, sge_len - offset); 620 621 if (dst) { 622 if (dma) { 623 qemu_sglist_add(&dst->qsg, sg->qsg.sg[sg_idx].base + offset, 624 trans_len); 625 } else { 626 qemu_iovec_add(&dst->iov, 627 sg->iov.iov[sg_idx].iov_base + offset, 628 trans_len); 629 } 630 } 631 632 sg_len -= trans_len; 633 count -= trans_len; 634 offset += trans_len; 635 636 if (count == 0) { 637 dst = (dst == data) ? mdata : data; 638 count = (dst == data) ? ns->lbasz : ns->lbaf.ms; 639 } 640 641 if (sge_len == offset) { 642 offset = 0; 643 sg_idx++; 644 } 645 } 646 } 647 648 static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, 649 size_t len) 650 { 651 if (!len) { 652 return NVME_SUCCESS; 653 } 654 655 trace_pci_nvme_map_addr_cmb(addr, len); 656 657 if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) { 658 return NVME_DATA_TRAS_ERROR; 659 } 660 661 qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len); 662 663 return NVME_SUCCESS; 664 } 665 666 static uint16_t nvme_map_addr_pmr(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, 667 size_t len) 668 { 669 if (!len) { 670 return NVME_SUCCESS; 671 } 672 673 if (!nvme_addr_is_pmr(n, addr) || !nvme_addr_is_pmr(n, addr + len - 1)) { 674 return NVME_DATA_TRAS_ERROR; 675 } 676 677 qemu_iovec_add(iov, nvme_addr_to_pmr(n, addr), len); 678 679 return NVME_SUCCESS; 680 } 681 682 static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len) 683 { 684 bool cmb = false, pmr = false; 685 686 if (!len) { 687 return NVME_SUCCESS; 688 } 689 690 trace_pci_nvme_map_addr(addr, len); 691 692 if (nvme_addr_is_iomem(n, addr)) { 693 return NVME_DATA_TRAS_ERROR; 694 } 695 696 if (nvme_addr_is_cmb(n, addr)) { 697 cmb = true; 698 } else if (nvme_addr_is_pmr(n, addr)) { 699 pmr = true; 700 } 701 702 if (cmb || pmr) { 703 if (sg->flags & NVME_SG_DMA) { 704 return NVME_INVALID_USE_OF_CMB | NVME_DNR; 705 } 706 707 if (sg->iov.niov + 1 > IOV_MAX) { 708 goto max_mappings_exceeded; 709 } 710 711 if (cmb) { 712 return nvme_map_addr_cmb(n, &sg->iov, addr, len); 713 } else { 714 return nvme_map_addr_pmr(n, &sg->iov, addr, len); 715 } 716 } 717 718 if (!(sg->flags & NVME_SG_DMA)) { 719 return NVME_INVALID_USE_OF_CMB | NVME_DNR; 720 } 721 722 if (sg->qsg.nsg + 1 > IOV_MAX) { 723 goto max_mappings_exceeded; 724 } 725 726 qemu_sglist_add(&sg->qsg, addr, len); 727 728 return NVME_SUCCESS; 729 730 max_mappings_exceeded: 731 NVME_GUEST_ERR(pci_nvme_ub_too_many_mappings, 732 "number of mappings exceed 1024"); 733 return NVME_INTERNAL_DEV_ERROR | NVME_DNR; 734 } 735 736 static inline bool nvme_addr_is_dma(NvmeCtrl *n, hwaddr addr) 737 { 738 return !(nvme_addr_is_cmb(n, addr) || nvme_addr_is_pmr(n, addr)); 739 } 740 741 static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1, 742 uint64_t prp2, uint32_t len) 743 { 744 hwaddr trans_len = n->page_size - (prp1 % n->page_size); 745 trans_len = MIN(len, trans_len); 746 int num_prps = (len >> n->page_bits) + 1; 747 uint16_t status; 748 int ret; 749 750 trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps); 751 752 nvme_sg_init(n, sg, nvme_addr_is_dma(n, prp1)); 753 754 status = nvme_map_addr(n, sg, prp1, trans_len); 755 if (status) { 756 goto unmap; 757 } 758 759 len -= trans_len; 760 if (len) { 761 if (len > n->page_size) { 762 uint64_t prp_list[n->max_prp_ents]; 763 uint32_t nents, prp_trans; 764 int i = 0; 765 766 /* 767 * The first PRP list entry, pointed to by PRP2 may contain offset. 768 * Hence, we need to calculate the number of entries in based on 769 * that offset. 770 */ 771 nents = (n->page_size - (prp2 & (n->page_size - 1))) >> 3; 772 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); 773 ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); 774 if (ret) { 775 trace_pci_nvme_err_addr_read(prp2); 776 status = NVME_DATA_TRAS_ERROR; 777 goto unmap; 778 } 779 while (len != 0) { 780 uint64_t prp_ent = le64_to_cpu(prp_list[i]); 781 782 if (i == nents - 1 && len > n->page_size) { 783 if (unlikely(prp_ent & (n->page_size - 1))) { 784 trace_pci_nvme_err_invalid_prplist_ent(prp_ent); 785 status = NVME_INVALID_PRP_OFFSET | NVME_DNR; 786 goto unmap; 787 } 788 789 i = 0; 790 nents = (len + n->page_size - 1) >> n->page_bits; 791 nents = MIN(nents, n->max_prp_ents); 792 prp_trans = nents * sizeof(uint64_t); 793 ret = nvme_addr_read(n, prp_ent, (void *)prp_list, 794 prp_trans); 795 if (ret) { 796 trace_pci_nvme_err_addr_read(prp_ent); 797 status = NVME_DATA_TRAS_ERROR; 798 goto unmap; 799 } 800 prp_ent = le64_to_cpu(prp_list[i]); 801 } 802 803 if (unlikely(prp_ent & (n->page_size - 1))) { 804 trace_pci_nvme_err_invalid_prplist_ent(prp_ent); 805 status = NVME_INVALID_PRP_OFFSET | NVME_DNR; 806 goto unmap; 807 } 808 809 trans_len = MIN(len, n->page_size); 810 status = nvme_map_addr(n, sg, prp_ent, trans_len); 811 if (status) { 812 goto unmap; 813 } 814 815 len -= trans_len; 816 i++; 817 } 818 } else { 819 if (unlikely(prp2 & (n->page_size - 1))) { 820 trace_pci_nvme_err_invalid_prp2_align(prp2); 821 status = NVME_INVALID_PRP_OFFSET | NVME_DNR; 822 goto unmap; 823 } 824 status = nvme_map_addr(n, sg, prp2, len); 825 if (status) { 826 goto unmap; 827 } 828 } 829 } 830 831 return NVME_SUCCESS; 832 833 unmap: 834 nvme_sg_unmap(sg); 835 return status; 836 } 837 838 /* 839 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the 840 * number of bytes mapped in len. 841 */ 842 static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, 843 NvmeSglDescriptor *segment, uint64_t nsgld, 844 size_t *len, NvmeCmd *cmd) 845 { 846 dma_addr_t addr, trans_len; 847 uint32_t dlen; 848 uint16_t status; 849 850 for (int i = 0; i < nsgld; i++) { 851 uint8_t type = NVME_SGL_TYPE(segment[i].type); 852 853 switch (type) { 854 case NVME_SGL_DESCR_TYPE_DATA_BLOCK: 855 break; 856 case NVME_SGL_DESCR_TYPE_SEGMENT: 857 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT: 858 return NVME_INVALID_NUM_SGL_DESCRS | NVME_DNR; 859 default: 860 return NVME_SGL_DESCR_TYPE_INVALID | NVME_DNR; 861 } 862 863 dlen = le32_to_cpu(segment[i].len); 864 865 if (!dlen) { 866 continue; 867 } 868 869 if (*len == 0) { 870 /* 871 * All data has been mapped, but the SGL contains additional 872 * segments and/or descriptors. The controller might accept 873 * ignoring the rest of the SGL. 874 */ 875 uint32_t sgls = le32_to_cpu(n->id_ctrl.sgls); 876 if (sgls & NVME_CTRL_SGLS_EXCESS_LENGTH) { 877 break; 878 } 879 880 trace_pci_nvme_err_invalid_sgl_excess_length(dlen); 881 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; 882 } 883 884 trans_len = MIN(*len, dlen); 885 886 addr = le64_to_cpu(segment[i].addr); 887 888 if (UINT64_MAX - addr < dlen) { 889 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; 890 } 891 892 status = nvme_map_addr(n, sg, addr, trans_len); 893 if (status) { 894 return status; 895 } 896 897 *len -= trans_len; 898 } 899 900 return NVME_SUCCESS; 901 } 902 903 static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, 904 size_t len, NvmeCmd *cmd) 905 { 906 /* 907 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid 908 * dynamically allocating a potentially huge SGL. The spec allows the SGL 909 * to be larger (as in number of bytes required to describe the SGL 910 * descriptors and segment chain) than the command transfer size, so it is 911 * not bounded by MDTS. 912 */ 913 const int SEG_CHUNK_SIZE = 256; 914 915 NvmeSglDescriptor segment[SEG_CHUNK_SIZE], *sgld, *last_sgld; 916 uint64_t nsgld; 917 uint32_t seg_len; 918 uint16_t status; 919 hwaddr addr; 920 int ret; 921 922 sgld = &sgl; 923 addr = le64_to_cpu(sgl.addr); 924 925 trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl.type), len); 926 927 nvme_sg_init(n, sg, nvme_addr_is_dma(n, addr)); 928 929 /* 930 * If the entire transfer can be described with a single data block it can 931 * be mapped directly. 932 */ 933 if (NVME_SGL_TYPE(sgl.type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) { 934 status = nvme_map_sgl_data(n, sg, sgld, 1, &len, cmd); 935 if (status) { 936 goto unmap; 937 } 938 939 goto out; 940 } 941 942 for (;;) { 943 switch (NVME_SGL_TYPE(sgld->type)) { 944 case NVME_SGL_DESCR_TYPE_SEGMENT: 945 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT: 946 break; 947 default: 948 return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; 949 } 950 951 seg_len = le32_to_cpu(sgld->len); 952 953 /* check the length of the (Last) Segment descriptor */ 954 if (!seg_len || seg_len & 0xf) { 955 return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; 956 } 957 958 if (UINT64_MAX - addr < seg_len) { 959 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; 960 } 961 962 nsgld = seg_len / sizeof(NvmeSglDescriptor); 963 964 while (nsgld > SEG_CHUNK_SIZE) { 965 if (nvme_addr_read(n, addr, segment, sizeof(segment))) { 966 trace_pci_nvme_err_addr_read(addr); 967 status = NVME_DATA_TRAS_ERROR; 968 goto unmap; 969 } 970 971 status = nvme_map_sgl_data(n, sg, segment, SEG_CHUNK_SIZE, 972 &len, cmd); 973 if (status) { 974 goto unmap; 975 } 976 977 nsgld -= SEG_CHUNK_SIZE; 978 addr += SEG_CHUNK_SIZE * sizeof(NvmeSglDescriptor); 979 } 980 981 ret = nvme_addr_read(n, addr, segment, nsgld * 982 sizeof(NvmeSglDescriptor)); 983 if (ret) { 984 trace_pci_nvme_err_addr_read(addr); 985 status = NVME_DATA_TRAS_ERROR; 986 goto unmap; 987 } 988 989 last_sgld = &segment[nsgld - 1]; 990 991 /* 992 * If the segment ends with a Data Block, then we are done. 993 */ 994 if (NVME_SGL_TYPE(last_sgld->type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) { 995 status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd); 996 if (status) { 997 goto unmap; 998 } 999 1000 goto out; 1001 } 1002 1003 /* 1004 * If the last descriptor was not a Data Block, then the current 1005 * segment must not be a Last Segment. 1006 */ 1007 if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) { 1008 status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; 1009 goto unmap; 1010 } 1011 1012 sgld = last_sgld; 1013 addr = le64_to_cpu(sgld->addr); 1014 1015 /* 1016 * Do not map the last descriptor; it will be a Segment or Last Segment 1017 * descriptor and is handled by the next iteration. 1018 */ 1019 status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, cmd); 1020 if (status) { 1021 goto unmap; 1022 } 1023 } 1024 1025 out: 1026 /* if there is any residual left in len, the SGL was too short */ 1027 if (len) { 1028 status = NVME_DATA_SGL_LEN_INVALID | NVME_DNR; 1029 goto unmap; 1030 } 1031 1032 return NVME_SUCCESS; 1033 1034 unmap: 1035 nvme_sg_unmap(sg); 1036 return status; 1037 } 1038 1039 uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, 1040 NvmeCmd *cmd) 1041 { 1042 uint64_t prp1, prp2; 1043 1044 switch (NVME_CMD_FLAGS_PSDT(cmd->flags)) { 1045 case NVME_PSDT_PRP: 1046 prp1 = le64_to_cpu(cmd->dptr.prp1); 1047 prp2 = le64_to_cpu(cmd->dptr.prp2); 1048 1049 return nvme_map_prp(n, sg, prp1, prp2, len); 1050 case NVME_PSDT_SGL_MPTR_CONTIGUOUS: 1051 case NVME_PSDT_SGL_MPTR_SGL: 1052 return nvme_map_sgl(n, sg, cmd->dptr.sgl, len, cmd); 1053 default: 1054 return NVME_INVALID_FIELD; 1055 } 1056 } 1057 1058 static uint16_t nvme_map_mptr(NvmeCtrl *n, NvmeSg *sg, size_t len, 1059 NvmeCmd *cmd) 1060 { 1061 int psdt = NVME_CMD_FLAGS_PSDT(cmd->flags); 1062 hwaddr mptr = le64_to_cpu(cmd->mptr); 1063 uint16_t status; 1064 1065 if (psdt == NVME_PSDT_SGL_MPTR_SGL) { 1066 NvmeSglDescriptor sgl; 1067 1068 if (nvme_addr_read(n, mptr, &sgl, sizeof(sgl))) { 1069 return NVME_DATA_TRAS_ERROR; 1070 } 1071 1072 status = nvme_map_sgl(n, sg, sgl, len, cmd); 1073 if (status && (status & 0x7ff) == NVME_DATA_SGL_LEN_INVALID) { 1074 status = NVME_MD_SGL_LEN_INVALID | NVME_DNR; 1075 } 1076 1077 return status; 1078 } 1079 1080 nvme_sg_init(n, sg, nvme_addr_is_dma(n, mptr)); 1081 status = nvme_map_addr(n, sg, mptr, len); 1082 if (status) { 1083 nvme_sg_unmap(sg); 1084 } 1085 1086 return status; 1087 } 1088 1089 static uint16_t nvme_map_data(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) 1090 { 1091 NvmeNamespace *ns = req->ns; 1092 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 1093 bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); 1094 bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); 1095 size_t len = nvme_l2b(ns, nlb); 1096 uint16_t status; 1097 1098 if (nvme_ns_ext(ns) && 1099 !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { 1100 NvmeSg sg; 1101 1102 len += nvme_m2b(ns, nlb); 1103 1104 status = nvme_map_dptr(n, &sg, len, &req->cmd); 1105 if (status) { 1106 return status; 1107 } 1108 1109 nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); 1110 nvme_sg_split(&sg, ns, &req->sg, NULL); 1111 nvme_sg_unmap(&sg); 1112 1113 return NVME_SUCCESS; 1114 } 1115 1116 return nvme_map_dptr(n, &req->sg, len, &req->cmd); 1117 } 1118 1119 static uint16_t nvme_map_mdata(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) 1120 { 1121 NvmeNamespace *ns = req->ns; 1122 size_t len = nvme_m2b(ns, nlb); 1123 uint16_t status; 1124 1125 if (nvme_ns_ext(ns)) { 1126 NvmeSg sg; 1127 1128 len += nvme_l2b(ns, nlb); 1129 1130 status = nvme_map_dptr(n, &sg, len, &req->cmd); 1131 if (status) { 1132 return status; 1133 } 1134 1135 nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); 1136 nvme_sg_split(&sg, ns, NULL, &req->sg); 1137 nvme_sg_unmap(&sg); 1138 1139 return NVME_SUCCESS; 1140 } 1141 1142 return nvme_map_mptr(n, &req->sg, len, &req->cmd); 1143 } 1144 1145 static uint16_t nvme_tx_interleaved(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, 1146 uint32_t len, uint32_t bytes, 1147 int32_t skip_bytes, int64_t offset, 1148 NvmeTxDirection dir) 1149 { 1150 hwaddr addr; 1151 uint32_t trans_len, count = bytes; 1152 bool dma = sg->flags & NVME_SG_DMA; 1153 int64_t sge_len; 1154 int sg_idx = 0; 1155 int ret; 1156 1157 assert(sg->flags & NVME_SG_ALLOC); 1158 1159 while (len) { 1160 sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; 1161 1162 if (sge_len - offset < 0) { 1163 offset -= sge_len; 1164 sg_idx++; 1165 continue; 1166 } 1167 1168 if (sge_len == offset) { 1169 offset = 0; 1170 sg_idx++; 1171 continue; 1172 } 1173 1174 trans_len = MIN(len, count); 1175 trans_len = MIN(trans_len, sge_len - offset); 1176 1177 if (dma) { 1178 addr = sg->qsg.sg[sg_idx].base + offset; 1179 } else { 1180 addr = (hwaddr)(uintptr_t)sg->iov.iov[sg_idx].iov_base + offset; 1181 } 1182 1183 if (dir == NVME_TX_DIRECTION_TO_DEVICE) { 1184 ret = nvme_addr_read(n, addr, ptr, trans_len); 1185 } else { 1186 ret = nvme_addr_write(n, addr, ptr, trans_len); 1187 } 1188 1189 if (ret) { 1190 return NVME_DATA_TRAS_ERROR; 1191 } 1192 1193 ptr += trans_len; 1194 len -= trans_len; 1195 count -= trans_len; 1196 offset += trans_len; 1197 1198 if (count == 0) { 1199 count = bytes; 1200 offset += skip_bytes; 1201 } 1202 } 1203 1204 return NVME_SUCCESS; 1205 } 1206 1207 static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, void *ptr, uint32_t len, 1208 NvmeTxDirection dir) 1209 { 1210 assert(sg->flags & NVME_SG_ALLOC); 1211 1212 if (sg->flags & NVME_SG_DMA) { 1213 const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; 1214 dma_addr_t residual; 1215 1216 if (dir == NVME_TX_DIRECTION_TO_DEVICE) { 1217 dma_buf_write(ptr, len, &residual, &sg->qsg, attrs); 1218 } else { 1219 dma_buf_read(ptr, len, &residual, &sg->qsg, attrs); 1220 } 1221 1222 if (unlikely(residual)) { 1223 trace_pci_nvme_err_invalid_dma(); 1224 return NVME_INVALID_FIELD | NVME_DNR; 1225 } 1226 } else { 1227 size_t bytes; 1228 1229 if (dir == NVME_TX_DIRECTION_TO_DEVICE) { 1230 bytes = qemu_iovec_to_buf(&sg->iov, 0, ptr, len); 1231 } else { 1232 bytes = qemu_iovec_from_buf(&sg->iov, 0, ptr, len); 1233 } 1234 1235 if (unlikely(bytes != len)) { 1236 trace_pci_nvme_err_invalid_dma(); 1237 return NVME_INVALID_FIELD | NVME_DNR; 1238 } 1239 } 1240 1241 return NVME_SUCCESS; 1242 } 1243 1244 static inline uint16_t nvme_c2h(NvmeCtrl *n, void *ptr, uint32_t len, 1245 NvmeRequest *req) 1246 { 1247 uint16_t status; 1248 1249 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); 1250 if (status) { 1251 return status; 1252 } 1253 1254 return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE); 1255 } 1256 1257 static inline uint16_t nvme_h2c(NvmeCtrl *n, void *ptr, uint32_t len, 1258 NvmeRequest *req) 1259 { 1260 uint16_t status; 1261 1262 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); 1263 if (status) { 1264 return status; 1265 } 1266 1267 return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE); 1268 } 1269 1270 uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len, 1271 NvmeTxDirection dir, NvmeRequest *req) 1272 { 1273 NvmeNamespace *ns = req->ns; 1274 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 1275 bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); 1276 bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); 1277 1278 if (nvme_ns_ext(ns) && 1279 !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { 1280 return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbasz, 1281 ns->lbaf.ms, 0, dir); 1282 } 1283 1284 return nvme_tx(n, &req->sg, ptr, len, dir); 1285 } 1286 1287 uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len, 1288 NvmeTxDirection dir, NvmeRequest *req) 1289 { 1290 NvmeNamespace *ns = req->ns; 1291 uint16_t status; 1292 1293 if (nvme_ns_ext(ns)) { 1294 return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbaf.ms, 1295 ns->lbasz, ns->lbasz, dir); 1296 } 1297 1298 nvme_sg_unmap(&req->sg); 1299 1300 status = nvme_map_mptr(n, &req->sg, len, &req->cmd); 1301 if (status) { 1302 return status; 1303 } 1304 1305 return nvme_tx(n, &req->sg, ptr, len, dir); 1306 } 1307 1308 static inline void nvme_blk_read(BlockBackend *blk, int64_t offset, 1309 BlockCompletionFunc *cb, NvmeRequest *req) 1310 { 1311 assert(req->sg.flags & NVME_SG_ALLOC); 1312 1313 if (req->sg.flags & NVME_SG_DMA) { 1314 req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, 1315 cb, req); 1316 } else { 1317 req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req); 1318 } 1319 } 1320 1321 static inline void nvme_blk_write(BlockBackend *blk, int64_t offset, 1322 BlockCompletionFunc *cb, NvmeRequest *req) 1323 { 1324 assert(req->sg.flags & NVME_SG_ALLOC); 1325 1326 if (req->sg.flags & NVME_SG_DMA) { 1327 req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, 1328 cb, req); 1329 } else { 1330 req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req); 1331 } 1332 } 1333 1334 static void nvme_update_cq_head(NvmeCQueue *cq) 1335 { 1336 pci_dma_read(&cq->ctrl->parent_obj, cq->db_addr, &cq->head, 1337 sizeof(cq->head)); 1338 trace_pci_nvme_shadow_doorbell_cq(cq->cqid, cq->head); 1339 } 1340 1341 static void nvme_post_cqes(void *opaque) 1342 { 1343 NvmeCQueue *cq = opaque; 1344 NvmeCtrl *n = cq->ctrl; 1345 NvmeRequest *req, *next; 1346 bool pending = cq->head != cq->tail; 1347 int ret; 1348 1349 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { 1350 NvmeSQueue *sq; 1351 hwaddr addr; 1352 1353 if (n->dbbuf_enabled) { 1354 nvme_update_cq_head(cq); 1355 } 1356 1357 if (nvme_cq_full(cq)) { 1358 break; 1359 } 1360 1361 sq = req->sq; 1362 req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase); 1363 req->cqe.sq_id = cpu_to_le16(sq->sqid); 1364 req->cqe.sq_head = cpu_to_le16(sq->head); 1365 addr = cq->dma_addr + cq->tail * n->cqe_size; 1366 ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe, 1367 sizeof(req->cqe)); 1368 if (ret) { 1369 trace_pci_nvme_err_addr_write(addr); 1370 trace_pci_nvme_err_cfs(); 1371 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); 1372 break; 1373 } 1374 QTAILQ_REMOVE(&cq->req_list, req, entry); 1375 nvme_inc_cq_tail(cq); 1376 nvme_sg_unmap(&req->sg); 1377 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); 1378 } 1379 if (cq->tail != cq->head) { 1380 if (cq->irq_enabled && !pending) { 1381 n->cq_pending++; 1382 } 1383 1384 nvme_irq_assert(n, cq); 1385 } 1386 } 1387 1388 static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req) 1389 { 1390 assert(cq->cqid == req->sq->cqid); 1391 trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid, 1392 le32_to_cpu(req->cqe.result), 1393 le32_to_cpu(req->cqe.dw1), 1394 req->status); 1395 1396 if (req->status) { 1397 trace_pci_nvme_err_req_status(nvme_cid(req), nvme_nsid(req->ns), 1398 req->status, req->cmd.opcode); 1399 } 1400 1401 QTAILQ_REMOVE(&req->sq->out_req_list, req, entry); 1402 QTAILQ_INSERT_TAIL(&cq->req_list, req, entry); 1403 1404 if (req->sq->ioeventfd_enabled) { 1405 /* Post CQE directly since we are in main loop thread */ 1406 nvme_post_cqes(cq); 1407 } else { 1408 /* Schedule the timer to post CQE later since we are in vcpu thread */ 1409 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); 1410 } 1411 } 1412 1413 static void nvme_process_aers(void *opaque) 1414 { 1415 NvmeCtrl *n = opaque; 1416 NvmeAsyncEvent *event, *next; 1417 1418 trace_pci_nvme_process_aers(n->aer_queued); 1419 1420 QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) { 1421 NvmeRequest *req; 1422 NvmeAerResult *result; 1423 1424 /* can't post cqe if there is nothing to complete */ 1425 if (!n->outstanding_aers) { 1426 trace_pci_nvme_no_outstanding_aers(); 1427 break; 1428 } 1429 1430 /* ignore if masked (cqe posted, but event not cleared) */ 1431 if (n->aer_mask & (1 << event->result.event_type)) { 1432 trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask); 1433 continue; 1434 } 1435 1436 QTAILQ_REMOVE(&n->aer_queue, event, entry); 1437 n->aer_queued--; 1438 1439 n->aer_mask |= 1 << event->result.event_type; 1440 n->outstanding_aers--; 1441 1442 req = n->aer_reqs[n->outstanding_aers]; 1443 1444 result = (NvmeAerResult *) &req->cqe.result; 1445 result->event_type = event->result.event_type; 1446 result->event_info = event->result.event_info; 1447 result->log_page = event->result.log_page; 1448 g_free(event); 1449 1450 trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info, 1451 result->log_page); 1452 1453 nvme_enqueue_req_completion(&n->admin_cq, req); 1454 } 1455 } 1456 1457 static void nvme_enqueue_event(NvmeCtrl *n, uint8_t event_type, 1458 uint8_t event_info, uint8_t log_page) 1459 { 1460 NvmeAsyncEvent *event; 1461 1462 trace_pci_nvme_enqueue_event(event_type, event_info, log_page); 1463 1464 if (n->aer_queued == n->params.aer_max_queued) { 1465 trace_pci_nvme_enqueue_event_noqueue(n->aer_queued); 1466 return; 1467 } 1468 1469 event = g_new(NvmeAsyncEvent, 1); 1470 event->result = (NvmeAerResult) { 1471 .event_type = event_type, 1472 .event_info = event_info, 1473 .log_page = log_page, 1474 }; 1475 1476 QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry); 1477 n->aer_queued++; 1478 1479 nvme_process_aers(n); 1480 } 1481 1482 static void nvme_smart_event(NvmeCtrl *n, uint8_t event) 1483 { 1484 uint8_t aer_info; 1485 1486 /* Ref SPEC <Asynchronous Event Information 0x2013 SMART / Health Status> */ 1487 if (!(NVME_AEC_SMART(n->features.async_config) & event)) { 1488 return; 1489 } 1490 1491 switch (event) { 1492 case NVME_SMART_SPARE: 1493 aer_info = NVME_AER_INFO_SMART_SPARE_THRESH; 1494 break; 1495 case NVME_SMART_TEMPERATURE: 1496 aer_info = NVME_AER_INFO_SMART_TEMP_THRESH; 1497 break; 1498 case NVME_SMART_RELIABILITY: 1499 case NVME_SMART_MEDIA_READ_ONLY: 1500 case NVME_SMART_FAILED_VOLATILE_MEDIA: 1501 case NVME_SMART_PMR_UNRELIABLE: 1502 aer_info = NVME_AER_INFO_SMART_RELIABILITY; 1503 break; 1504 default: 1505 return; 1506 } 1507 1508 nvme_enqueue_event(n, NVME_AER_TYPE_SMART, aer_info, NVME_LOG_SMART_INFO); 1509 } 1510 1511 static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type) 1512 { 1513 n->aer_mask &= ~(1 << event_type); 1514 if (!QTAILQ_EMPTY(&n->aer_queue)) { 1515 nvme_process_aers(n); 1516 } 1517 } 1518 1519 static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len) 1520 { 1521 uint8_t mdts = n->params.mdts; 1522 1523 if (mdts && len > n->page_size << mdts) { 1524 trace_pci_nvme_err_mdts(len); 1525 return NVME_INVALID_FIELD | NVME_DNR; 1526 } 1527 1528 return NVME_SUCCESS; 1529 } 1530 1531 static inline uint16_t nvme_check_bounds(NvmeNamespace *ns, uint64_t slba, 1532 uint32_t nlb) 1533 { 1534 uint64_t nsze = le64_to_cpu(ns->id_ns.nsze); 1535 1536 if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) { 1537 trace_pci_nvme_err_invalid_lba_range(slba, nlb, nsze); 1538 return NVME_LBA_RANGE | NVME_DNR; 1539 } 1540 1541 return NVME_SUCCESS; 1542 } 1543 1544 static int nvme_block_status_all(NvmeNamespace *ns, uint64_t slba, 1545 uint32_t nlb, int flags) 1546 { 1547 BlockDriverState *bs = blk_bs(ns->blkconf.blk); 1548 1549 int64_t pnum = 0, bytes = nvme_l2b(ns, nlb); 1550 int64_t offset = nvme_l2b(ns, slba); 1551 int ret; 1552 1553 /* 1554 * `pnum` holds the number of bytes after offset that shares the same 1555 * allocation status as the byte at offset. If `pnum` is different from 1556 * `bytes`, we should check the allocation status of the next range and 1557 * continue this until all bytes have been checked. 1558 */ 1559 do { 1560 bytes -= pnum; 1561 1562 ret = bdrv_block_status(bs, offset, bytes, &pnum, NULL, NULL); 1563 if (ret < 0) { 1564 return ret; 1565 } 1566 1567 1568 trace_pci_nvme_block_status(offset, bytes, pnum, ret, 1569 !!(ret & BDRV_BLOCK_ZERO)); 1570 1571 if (!(ret & flags)) { 1572 return 1; 1573 } 1574 1575 offset += pnum; 1576 } while (pnum != bytes); 1577 1578 return 0; 1579 } 1580 1581 static uint16_t nvme_check_dulbe(NvmeNamespace *ns, uint64_t slba, 1582 uint32_t nlb) 1583 { 1584 int ret; 1585 Error *err = NULL; 1586 1587 ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_DATA); 1588 if (ret) { 1589 if (ret < 0) { 1590 error_setg_errno(&err, -ret, "unable to get block status"); 1591 error_report_err(err); 1592 1593 return NVME_INTERNAL_DEV_ERROR; 1594 } 1595 1596 return NVME_DULB; 1597 } 1598 1599 return NVME_SUCCESS; 1600 } 1601 1602 static void nvme_aio_err(NvmeRequest *req, int ret) 1603 { 1604 uint16_t status = NVME_SUCCESS; 1605 Error *local_err = NULL; 1606 1607 switch (req->cmd.opcode) { 1608 case NVME_CMD_READ: 1609 status = NVME_UNRECOVERED_READ; 1610 break; 1611 case NVME_CMD_FLUSH: 1612 case NVME_CMD_WRITE: 1613 case NVME_CMD_WRITE_ZEROES: 1614 case NVME_CMD_ZONE_APPEND: 1615 status = NVME_WRITE_FAULT; 1616 break; 1617 default: 1618 status = NVME_INTERNAL_DEV_ERROR; 1619 break; 1620 } 1621 1622 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status); 1623 1624 error_setg_errno(&local_err, -ret, "aio failed"); 1625 error_report_err(local_err); 1626 1627 /* 1628 * Set the command status code to the first encountered error but allow a 1629 * subsequent Internal Device Error to trump it. 1630 */ 1631 if (req->status && status != NVME_INTERNAL_DEV_ERROR) { 1632 return; 1633 } 1634 1635 req->status = status; 1636 } 1637 1638 static inline uint32_t nvme_zone_idx(NvmeNamespace *ns, uint64_t slba) 1639 { 1640 return ns->zone_size_log2 > 0 ? slba >> ns->zone_size_log2 : 1641 slba / ns->zone_size; 1642 } 1643 1644 static inline NvmeZone *nvme_get_zone_by_slba(NvmeNamespace *ns, uint64_t slba) 1645 { 1646 uint32_t zone_idx = nvme_zone_idx(ns, slba); 1647 1648 if (zone_idx >= ns->num_zones) { 1649 return NULL; 1650 } 1651 1652 return &ns->zone_array[zone_idx]; 1653 } 1654 1655 static uint16_t nvme_check_zone_state_for_write(NvmeZone *zone) 1656 { 1657 uint64_t zslba = zone->d.zslba; 1658 1659 switch (nvme_get_zone_state(zone)) { 1660 case NVME_ZONE_STATE_EMPTY: 1661 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1662 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1663 case NVME_ZONE_STATE_CLOSED: 1664 return NVME_SUCCESS; 1665 case NVME_ZONE_STATE_FULL: 1666 trace_pci_nvme_err_zone_is_full(zslba); 1667 return NVME_ZONE_FULL; 1668 case NVME_ZONE_STATE_OFFLINE: 1669 trace_pci_nvme_err_zone_is_offline(zslba); 1670 return NVME_ZONE_OFFLINE; 1671 case NVME_ZONE_STATE_READ_ONLY: 1672 trace_pci_nvme_err_zone_is_read_only(zslba); 1673 return NVME_ZONE_READ_ONLY; 1674 default: 1675 assert(false); 1676 } 1677 1678 return NVME_INTERNAL_DEV_ERROR; 1679 } 1680 1681 static uint16_t nvme_check_zone_write(NvmeNamespace *ns, NvmeZone *zone, 1682 uint64_t slba, uint32_t nlb) 1683 { 1684 uint64_t zcap = nvme_zone_wr_boundary(zone); 1685 uint16_t status; 1686 1687 status = nvme_check_zone_state_for_write(zone); 1688 if (status) { 1689 return status; 1690 } 1691 1692 if (zone->d.za & NVME_ZA_ZRWA_VALID) { 1693 uint64_t ezrwa = zone->w_ptr + 2 * ns->zns.zrwas; 1694 1695 if (slba < zone->w_ptr || slba + nlb > ezrwa) { 1696 trace_pci_nvme_err_zone_invalid_write(slba, zone->w_ptr); 1697 return NVME_ZONE_INVALID_WRITE; 1698 } 1699 } else { 1700 if (unlikely(slba != zone->w_ptr)) { 1701 trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba, 1702 zone->w_ptr); 1703 return NVME_ZONE_INVALID_WRITE; 1704 } 1705 } 1706 1707 if (unlikely((slba + nlb) > zcap)) { 1708 trace_pci_nvme_err_zone_boundary(slba, nlb, zcap); 1709 return NVME_ZONE_BOUNDARY_ERROR; 1710 } 1711 1712 return NVME_SUCCESS; 1713 } 1714 1715 static uint16_t nvme_check_zone_state_for_read(NvmeZone *zone) 1716 { 1717 switch (nvme_get_zone_state(zone)) { 1718 case NVME_ZONE_STATE_EMPTY: 1719 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1720 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1721 case NVME_ZONE_STATE_FULL: 1722 case NVME_ZONE_STATE_CLOSED: 1723 case NVME_ZONE_STATE_READ_ONLY: 1724 return NVME_SUCCESS; 1725 case NVME_ZONE_STATE_OFFLINE: 1726 trace_pci_nvme_err_zone_is_offline(zone->d.zslba); 1727 return NVME_ZONE_OFFLINE; 1728 default: 1729 assert(false); 1730 } 1731 1732 return NVME_INTERNAL_DEV_ERROR; 1733 } 1734 1735 static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba, 1736 uint32_t nlb) 1737 { 1738 NvmeZone *zone; 1739 uint64_t bndry, end; 1740 uint16_t status; 1741 1742 zone = nvme_get_zone_by_slba(ns, slba); 1743 assert(zone); 1744 1745 bndry = nvme_zone_rd_boundary(ns, zone); 1746 end = slba + nlb; 1747 1748 status = nvme_check_zone_state_for_read(zone); 1749 if (status) { 1750 ; 1751 } else if (unlikely(end > bndry)) { 1752 if (!ns->params.cross_zone_read) { 1753 status = NVME_ZONE_BOUNDARY_ERROR; 1754 } else { 1755 /* 1756 * Read across zone boundary - check that all subsequent 1757 * zones that are being read have an appropriate state. 1758 */ 1759 do { 1760 zone++; 1761 status = nvme_check_zone_state_for_read(zone); 1762 if (status) { 1763 break; 1764 } 1765 } while (end > nvme_zone_rd_boundary(ns, zone)); 1766 } 1767 } 1768 1769 return status; 1770 } 1771 1772 static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone) 1773 { 1774 switch (nvme_get_zone_state(zone)) { 1775 case NVME_ZONE_STATE_FULL: 1776 return NVME_SUCCESS; 1777 1778 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1779 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1780 nvme_aor_dec_open(ns); 1781 /* fallthrough */ 1782 case NVME_ZONE_STATE_CLOSED: 1783 nvme_aor_dec_active(ns); 1784 1785 if (zone->d.za & NVME_ZA_ZRWA_VALID) { 1786 zone->d.za &= ~NVME_ZA_ZRWA_VALID; 1787 if (ns->params.numzrwa) { 1788 ns->zns.numzrwa++; 1789 } 1790 } 1791 1792 /* fallthrough */ 1793 case NVME_ZONE_STATE_EMPTY: 1794 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL); 1795 return NVME_SUCCESS; 1796 1797 default: 1798 return NVME_ZONE_INVAL_TRANSITION; 1799 } 1800 } 1801 1802 static uint16_t nvme_zrm_close(NvmeNamespace *ns, NvmeZone *zone) 1803 { 1804 switch (nvme_get_zone_state(zone)) { 1805 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1806 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1807 nvme_aor_dec_open(ns); 1808 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED); 1809 /* fall through */ 1810 case NVME_ZONE_STATE_CLOSED: 1811 return NVME_SUCCESS; 1812 1813 default: 1814 return NVME_ZONE_INVAL_TRANSITION; 1815 } 1816 } 1817 1818 static uint16_t nvme_zrm_reset(NvmeNamespace *ns, NvmeZone *zone) 1819 { 1820 switch (nvme_get_zone_state(zone)) { 1821 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1822 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1823 nvme_aor_dec_open(ns); 1824 /* fallthrough */ 1825 case NVME_ZONE_STATE_CLOSED: 1826 nvme_aor_dec_active(ns); 1827 1828 if (zone->d.za & NVME_ZA_ZRWA_VALID) { 1829 if (ns->params.numzrwa) { 1830 ns->zns.numzrwa++; 1831 } 1832 } 1833 1834 /* fallthrough */ 1835 case NVME_ZONE_STATE_FULL: 1836 zone->w_ptr = zone->d.zslba; 1837 zone->d.wp = zone->w_ptr; 1838 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EMPTY); 1839 /* fallthrough */ 1840 case NVME_ZONE_STATE_EMPTY: 1841 return NVME_SUCCESS; 1842 1843 default: 1844 return NVME_ZONE_INVAL_TRANSITION; 1845 } 1846 } 1847 1848 static void nvme_zrm_auto_transition_zone(NvmeNamespace *ns) 1849 { 1850 NvmeZone *zone; 1851 1852 if (ns->params.max_open_zones && 1853 ns->nr_open_zones == ns->params.max_open_zones) { 1854 zone = QTAILQ_FIRST(&ns->imp_open_zones); 1855 if (zone) { 1856 /* 1857 * Automatically close this implicitly open zone. 1858 */ 1859 QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); 1860 nvme_zrm_close(ns, zone); 1861 } 1862 } 1863 } 1864 1865 enum { 1866 NVME_ZRM_AUTO = 1 << 0, 1867 NVME_ZRM_ZRWA = 1 << 1, 1868 }; 1869 1870 static uint16_t nvme_zrm_open_flags(NvmeCtrl *n, NvmeNamespace *ns, 1871 NvmeZone *zone, int flags) 1872 { 1873 int act = 0; 1874 uint16_t status; 1875 1876 switch (nvme_get_zone_state(zone)) { 1877 case NVME_ZONE_STATE_EMPTY: 1878 act = 1; 1879 1880 /* fallthrough */ 1881 1882 case NVME_ZONE_STATE_CLOSED: 1883 if (n->params.auto_transition_zones) { 1884 nvme_zrm_auto_transition_zone(ns); 1885 } 1886 status = nvme_zns_check_resources(ns, act, 1, 1887 (flags & NVME_ZRM_ZRWA) ? 1 : 0); 1888 if (status) { 1889 return status; 1890 } 1891 1892 if (act) { 1893 nvme_aor_inc_active(ns); 1894 } 1895 1896 nvme_aor_inc_open(ns); 1897 1898 if (flags & NVME_ZRM_AUTO) { 1899 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN); 1900 return NVME_SUCCESS; 1901 } 1902 1903 /* fallthrough */ 1904 1905 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1906 if (flags & NVME_ZRM_AUTO) { 1907 return NVME_SUCCESS; 1908 } 1909 1910 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN); 1911 1912 /* fallthrough */ 1913 1914 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1915 if (flags & NVME_ZRM_ZRWA) { 1916 ns->zns.numzrwa--; 1917 1918 zone->d.za |= NVME_ZA_ZRWA_VALID; 1919 } 1920 1921 return NVME_SUCCESS; 1922 1923 default: 1924 return NVME_ZONE_INVAL_TRANSITION; 1925 } 1926 } 1927 1928 static inline uint16_t nvme_zrm_auto(NvmeCtrl *n, NvmeNamespace *ns, 1929 NvmeZone *zone) 1930 { 1931 return nvme_zrm_open_flags(n, ns, zone, NVME_ZRM_AUTO); 1932 } 1933 1934 static void nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone, 1935 uint32_t nlb) 1936 { 1937 zone->d.wp += nlb; 1938 1939 if (zone->d.wp == nvme_zone_wr_boundary(zone)) { 1940 nvme_zrm_finish(ns, zone); 1941 } 1942 } 1943 1944 static void nvme_zoned_zrwa_implicit_flush(NvmeNamespace *ns, NvmeZone *zone, 1945 uint32_t nlbc) 1946 { 1947 uint16_t nzrwafgs = DIV_ROUND_UP(nlbc, ns->zns.zrwafg); 1948 1949 nlbc = nzrwafgs * ns->zns.zrwafg; 1950 1951 trace_pci_nvme_zoned_zrwa_implicit_flush(zone->d.zslba, nlbc); 1952 1953 zone->w_ptr += nlbc; 1954 1955 nvme_advance_zone_wp(ns, zone, nlbc); 1956 } 1957 1958 static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req) 1959 { 1960 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 1961 NvmeZone *zone; 1962 uint64_t slba; 1963 uint32_t nlb; 1964 1965 slba = le64_to_cpu(rw->slba); 1966 nlb = le16_to_cpu(rw->nlb) + 1; 1967 zone = nvme_get_zone_by_slba(ns, slba); 1968 assert(zone); 1969 1970 if (zone->d.za & NVME_ZA_ZRWA_VALID) { 1971 uint64_t ezrwa = zone->w_ptr + ns->zns.zrwas - 1; 1972 uint64_t elba = slba + nlb - 1; 1973 1974 if (elba > ezrwa) { 1975 nvme_zoned_zrwa_implicit_flush(ns, zone, elba - ezrwa); 1976 } 1977 1978 return; 1979 } 1980 1981 nvme_advance_zone_wp(ns, zone, nlb); 1982 } 1983 1984 static inline bool nvme_is_write(NvmeRequest *req) 1985 { 1986 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 1987 1988 return rw->opcode == NVME_CMD_WRITE || 1989 rw->opcode == NVME_CMD_ZONE_APPEND || 1990 rw->opcode == NVME_CMD_WRITE_ZEROES; 1991 } 1992 1993 static AioContext *nvme_get_aio_context(BlockAIOCB *acb) 1994 { 1995 return qemu_get_aio_context(); 1996 } 1997 1998 static void nvme_misc_cb(void *opaque, int ret) 1999 { 2000 NvmeRequest *req = opaque; 2001 2002 trace_pci_nvme_misc_cb(nvme_cid(req)); 2003 2004 if (ret) { 2005 nvme_aio_err(req, ret); 2006 } 2007 2008 nvme_enqueue_req_completion(nvme_cq(req), req); 2009 } 2010 2011 void nvme_rw_complete_cb(void *opaque, int ret) 2012 { 2013 NvmeRequest *req = opaque; 2014 NvmeNamespace *ns = req->ns; 2015 BlockBackend *blk = ns->blkconf.blk; 2016 BlockAcctCookie *acct = &req->acct; 2017 BlockAcctStats *stats = blk_get_stats(blk); 2018 2019 trace_pci_nvme_rw_complete_cb(nvme_cid(req), blk_name(blk)); 2020 2021 if (ret) { 2022 block_acct_failed(stats, acct); 2023 nvme_aio_err(req, ret); 2024 } else { 2025 block_acct_done(stats, acct); 2026 } 2027 2028 if (ns->params.zoned && nvme_is_write(req)) { 2029 nvme_finalize_zoned_write(ns, req); 2030 } 2031 2032 nvme_enqueue_req_completion(nvme_cq(req), req); 2033 } 2034 2035 static void nvme_rw_cb(void *opaque, int ret) 2036 { 2037 NvmeRequest *req = opaque; 2038 NvmeNamespace *ns = req->ns; 2039 2040 BlockBackend *blk = ns->blkconf.blk; 2041 2042 trace_pci_nvme_rw_cb(nvme_cid(req), blk_name(blk)); 2043 2044 if (ret) { 2045 goto out; 2046 } 2047 2048 if (ns->lbaf.ms) { 2049 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2050 uint64_t slba = le64_to_cpu(rw->slba); 2051 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; 2052 uint64_t offset = nvme_moff(ns, slba); 2053 2054 if (req->cmd.opcode == NVME_CMD_WRITE_ZEROES) { 2055 size_t mlen = nvme_m2b(ns, nlb); 2056 2057 req->aiocb = blk_aio_pwrite_zeroes(blk, offset, mlen, 2058 BDRV_REQ_MAY_UNMAP, 2059 nvme_rw_complete_cb, req); 2060 return; 2061 } 2062 2063 if (nvme_ns_ext(ns) || req->cmd.mptr) { 2064 uint16_t status; 2065 2066 nvme_sg_unmap(&req->sg); 2067 status = nvme_map_mdata(nvme_ctrl(req), nlb, req); 2068 if (status) { 2069 ret = -EFAULT; 2070 goto out; 2071 } 2072 2073 if (req->cmd.opcode == NVME_CMD_READ) { 2074 return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req); 2075 } 2076 2077 return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req); 2078 } 2079 } 2080 2081 out: 2082 nvme_rw_complete_cb(req, ret); 2083 } 2084 2085 static void nvme_verify_cb(void *opaque, int ret) 2086 { 2087 NvmeBounceContext *ctx = opaque; 2088 NvmeRequest *req = ctx->req; 2089 NvmeNamespace *ns = req->ns; 2090 BlockBackend *blk = ns->blkconf.blk; 2091 BlockAcctCookie *acct = &req->acct; 2092 BlockAcctStats *stats = blk_get_stats(blk); 2093 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2094 uint64_t slba = le64_to_cpu(rw->slba); 2095 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); 2096 uint16_t apptag = le16_to_cpu(rw->apptag); 2097 uint16_t appmask = le16_to_cpu(rw->appmask); 2098 uint64_t reftag = le32_to_cpu(rw->reftag); 2099 uint64_t cdw3 = le32_to_cpu(rw->cdw3); 2100 uint16_t status; 2101 2102 reftag |= cdw3 << 32; 2103 2104 trace_pci_nvme_verify_cb(nvme_cid(req), prinfo, apptag, appmask, reftag); 2105 2106 if (ret) { 2107 block_acct_failed(stats, acct); 2108 nvme_aio_err(req, ret); 2109 goto out; 2110 } 2111 2112 block_acct_done(stats, acct); 2113 2114 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 2115 status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce, 2116 ctx->mdata.iov.size, slba); 2117 if (status) { 2118 req->status = status; 2119 goto out; 2120 } 2121 2122 req->status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, 2123 ctx->mdata.bounce, ctx->mdata.iov.size, 2124 prinfo, slba, apptag, appmask, &reftag); 2125 } 2126 2127 out: 2128 qemu_iovec_destroy(&ctx->data.iov); 2129 g_free(ctx->data.bounce); 2130 2131 qemu_iovec_destroy(&ctx->mdata.iov); 2132 g_free(ctx->mdata.bounce); 2133 2134 g_free(ctx); 2135 2136 nvme_enqueue_req_completion(nvme_cq(req), req); 2137 } 2138 2139 2140 static void nvme_verify_mdata_in_cb(void *opaque, int ret) 2141 { 2142 NvmeBounceContext *ctx = opaque; 2143 NvmeRequest *req = ctx->req; 2144 NvmeNamespace *ns = req->ns; 2145 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2146 uint64_t slba = le64_to_cpu(rw->slba); 2147 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; 2148 size_t mlen = nvme_m2b(ns, nlb); 2149 uint64_t offset = nvme_moff(ns, slba); 2150 BlockBackend *blk = ns->blkconf.blk; 2151 2152 trace_pci_nvme_verify_mdata_in_cb(nvme_cid(req), blk_name(blk)); 2153 2154 if (ret) { 2155 goto out; 2156 } 2157 2158 ctx->mdata.bounce = g_malloc(mlen); 2159 2160 qemu_iovec_reset(&ctx->mdata.iov); 2161 qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); 2162 2163 req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, 2164 nvme_verify_cb, ctx); 2165 return; 2166 2167 out: 2168 nvme_verify_cb(ctx, ret); 2169 } 2170 2171 struct nvme_compare_ctx { 2172 struct { 2173 QEMUIOVector iov; 2174 uint8_t *bounce; 2175 } data; 2176 2177 struct { 2178 QEMUIOVector iov; 2179 uint8_t *bounce; 2180 } mdata; 2181 }; 2182 2183 static void nvme_compare_mdata_cb(void *opaque, int ret) 2184 { 2185 NvmeRequest *req = opaque; 2186 NvmeNamespace *ns = req->ns; 2187 NvmeCtrl *n = nvme_ctrl(req); 2188 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2189 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); 2190 uint16_t apptag = le16_to_cpu(rw->apptag); 2191 uint16_t appmask = le16_to_cpu(rw->appmask); 2192 uint64_t reftag = le32_to_cpu(rw->reftag); 2193 uint64_t cdw3 = le32_to_cpu(rw->cdw3); 2194 struct nvme_compare_ctx *ctx = req->opaque; 2195 g_autofree uint8_t *buf = NULL; 2196 BlockBackend *blk = ns->blkconf.blk; 2197 BlockAcctCookie *acct = &req->acct; 2198 BlockAcctStats *stats = blk_get_stats(blk); 2199 uint16_t status = NVME_SUCCESS; 2200 2201 reftag |= cdw3 << 32; 2202 2203 trace_pci_nvme_compare_mdata_cb(nvme_cid(req)); 2204 2205 if (ret) { 2206 block_acct_failed(stats, acct); 2207 nvme_aio_err(req, ret); 2208 goto out; 2209 } 2210 2211 buf = g_malloc(ctx->mdata.iov.size); 2212 2213 status = nvme_bounce_mdata(n, buf, ctx->mdata.iov.size, 2214 NVME_TX_DIRECTION_TO_DEVICE, req); 2215 if (status) { 2216 req->status = status; 2217 goto out; 2218 } 2219 2220 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 2221 uint64_t slba = le64_to_cpu(rw->slba); 2222 uint8_t *bufp; 2223 uint8_t *mbufp = ctx->mdata.bounce; 2224 uint8_t *end = mbufp + ctx->mdata.iov.size; 2225 int16_t pil = 0; 2226 2227 status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, 2228 ctx->mdata.bounce, ctx->mdata.iov.size, prinfo, 2229 slba, apptag, appmask, &reftag); 2230 if (status) { 2231 req->status = status; 2232 goto out; 2233 } 2234 2235 /* 2236 * When formatted with protection information, do not compare the DIF 2237 * tuple. 2238 */ 2239 if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { 2240 pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); 2241 } 2242 2243 for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { 2244 if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) { 2245 req->status = NVME_CMP_FAILURE; 2246 goto out; 2247 } 2248 } 2249 2250 goto out; 2251 } 2252 2253 if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { 2254 req->status = NVME_CMP_FAILURE; 2255 goto out; 2256 } 2257 2258 block_acct_done(stats, acct); 2259 2260 out: 2261 qemu_iovec_destroy(&ctx->data.iov); 2262 g_free(ctx->data.bounce); 2263 2264 qemu_iovec_destroy(&ctx->mdata.iov); 2265 g_free(ctx->mdata.bounce); 2266 2267 g_free(ctx); 2268 2269 nvme_enqueue_req_completion(nvme_cq(req), req); 2270 } 2271 2272 static void nvme_compare_data_cb(void *opaque, int ret) 2273 { 2274 NvmeRequest *req = opaque; 2275 NvmeCtrl *n = nvme_ctrl(req); 2276 NvmeNamespace *ns = req->ns; 2277 BlockBackend *blk = ns->blkconf.blk; 2278 BlockAcctCookie *acct = &req->acct; 2279 BlockAcctStats *stats = blk_get_stats(blk); 2280 2281 struct nvme_compare_ctx *ctx = req->opaque; 2282 g_autofree uint8_t *buf = NULL; 2283 uint16_t status; 2284 2285 trace_pci_nvme_compare_data_cb(nvme_cid(req)); 2286 2287 if (ret) { 2288 block_acct_failed(stats, acct); 2289 nvme_aio_err(req, ret); 2290 goto out; 2291 } 2292 2293 buf = g_malloc(ctx->data.iov.size); 2294 2295 status = nvme_bounce_data(n, buf, ctx->data.iov.size, 2296 NVME_TX_DIRECTION_TO_DEVICE, req); 2297 if (status) { 2298 req->status = status; 2299 goto out; 2300 } 2301 2302 if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) { 2303 req->status = NVME_CMP_FAILURE; 2304 goto out; 2305 } 2306 2307 if (ns->lbaf.ms) { 2308 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2309 uint64_t slba = le64_to_cpu(rw->slba); 2310 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; 2311 size_t mlen = nvme_m2b(ns, nlb); 2312 uint64_t offset = nvme_moff(ns, slba); 2313 2314 ctx->mdata.bounce = g_malloc(mlen); 2315 2316 qemu_iovec_init(&ctx->mdata.iov, 1); 2317 qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); 2318 2319 req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, 2320 nvme_compare_mdata_cb, req); 2321 return; 2322 } 2323 2324 block_acct_done(stats, acct); 2325 2326 out: 2327 qemu_iovec_destroy(&ctx->data.iov); 2328 g_free(ctx->data.bounce); 2329 g_free(ctx); 2330 2331 nvme_enqueue_req_completion(nvme_cq(req), req); 2332 } 2333 2334 typedef struct NvmeDSMAIOCB { 2335 BlockAIOCB common; 2336 BlockAIOCB *aiocb; 2337 NvmeRequest *req; 2338 QEMUBH *bh; 2339 int ret; 2340 2341 NvmeDsmRange *range; 2342 unsigned int nr; 2343 unsigned int idx; 2344 } NvmeDSMAIOCB; 2345 2346 static void nvme_dsm_cancel(BlockAIOCB *aiocb) 2347 { 2348 NvmeDSMAIOCB *iocb = container_of(aiocb, NvmeDSMAIOCB, common); 2349 2350 /* break nvme_dsm_cb loop */ 2351 iocb->idx = iocb->nr; 2352 iocb->ret = -ECANCELED; 2353 2354 if (iocb->aiocb) { 2355 blk_aio_cancel_async(iocb->aiocb); 2356 iocb->aiocb = NULL; 2357 } else { 2358 /* 2359 * We only reach this if nvme_dsm_cancel() has already been called or 2360 * the command ran to completion and nvme_dsm_bh is scheduled to run. 2361 */ 2362 assert(iocb->idx == iocb->nr); 2363 } 2364 } 2365 2366 static const AIOCBInfo nvme_dsm_aiocb_info = { 2367 .aiocb_size = sizeof(NvmeDSMAIOCB), 2368 .cancel_async = nvme_dsm_cancel, 2369 }; 2370 2371 static void nvme_dsm_bh(void *opaque) 2372 { 2373 NvmeDSMAIOCB *iocb = opaque; 2374 2375 iocb->common.cb(iocb->common.opaque, iocb->ret); 2376 2377 qemu_bh_delete(iocb->bh); 2378 iocb->bh = NULL; 2379 qemu_aio_unref(iocb); 2380 } 2381 2382 static void nvme_dsm_cb(void *opaque, int ret); 2383 2384 static void nvme_dsm_md_cb(void *opaque, int ret) 2385 { 2386 NvmeDSMAIOCB *iocb = opaque; 2387 NvmeRequest *req = iocb->req; 2388 NvmeNamespace *ns = req->ns; 2389 NvmeDsmRange *range; 2390 uint64_t slba; 2391 uint32_t nlb; 2392 2393 if (ret < 0) { 2394 iocb->ret = ret; 2395 goto done; 2396 } 2397 2398 if (!ns->lbaf.ms) { 2399 nvme_dsm_cb(iocb, 0); 2400 return; 2401 } 2402 2403 range = &iocb->range[iocb->idx - 1]; 2404 slba = le64_to_cpu(range->slba); 2405 nlb = le32_to_cpu(range->nlb); 2406 2407 /* 2408 * Check that all block were discarded (zeroed); otherwise we do not zero 2409 * the metadata. 2410 */ 2411 2412 ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_ZERO); 2413 if (ret) { 2414 if (ret < 0) { 2415 iocb->ret = ret; 2416 goto done; 2417 } 2418 2419 nvme_dsm_cb(iocb, 0); 2420 return; 2421 } 2422 2423 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, nvme_moff(ns, slba), 2424 nvme_m2b(ns, nlb), BDRV_REQ_MAY_UNMAP, 2425 nvme_dsm_cb, iocb); 2426 return; 2427 2428 done: 2429 iocb->aiocb = NULL; 2430 qemu_bh_schedule(iocb->bh); 2431 } 2432 2433 static void nvme_dsm_cb(void *opaque, int ret) 2434 { 2435 NvmeDSMAIOCB *iocb = opaque; 2436 NvmeRequest *req = iocb->req; 2437 NvmeCtrl *n = nvme_ctrl(req); 2438 NvmeNamespace *ns = req->ns; 2439 NvmeDsmRange *range; 2440 uint64_t slba; 2441 uint32_t nlb; 2442 2443 if (ret < 0) { 2444 iocb->ret = ret; 2445 goto done; 2446 } 2447 2448 next: 2449 if (iocb->idx == iocb->nr) { 2450 goto done; 2451 } 2452 2453 range = &iocb->range[iocb->idx++]; 2454 slba = le64_to_cpu(range->slba); 2455 nlb = le32_to_cpu(range->nlb); 2456 2457 trace_pci_nvme_dsm_deallocate(slba, nlb); 2458 2459 if (nlb > n->dmrsl) { 2460 trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, n->dmrsl); 2461 goto next; 2462 } 2463 2464 if (nvme_check_bounds(ns, slba, nlb)) { 2465 trace_pci_nvme_err_invalid_lba_range(slba, nlb, 2466 ns->id_ns.nsze); 2467 goto next; 2468 } 2469 2470 iocb->aiocb = blk_aio_pdiscard(ns->blkconf.blk, nvme_l2b(ns, slba), 2471 nvme_l2b(ns, nlb), 2472 nvme_dsm_md_cb, iocb); 2473 return; 2474 2475 done: 2476 iocb->aiocb = NULL; 2477 qemu_bh_schedule(iocb->bh); 2478 } 2479 2480 static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) 2481 { 2482 NvmeNamespace *ns = req->ns; 2483 NvmeDsmCmd *dsm = (NvmeDsmCmd *) &req->cmd; 2484 uint32_t attr = le32_to_cpu(dsm->attributes); 2485 uint32_t nr = (le32_to_cpu(dsm->nr) & 0xff) + 1; 2486 uint16_t status = NVME_SUCCESS; 2487 2488 trace_pci_nvme_dsm(nr, attr); 2489 2490 if (attr & NVME_DSMGMT_AD) { 2491 NvmeDSMAIOCB *iocb = blk_aio_get(&nvme_dsm_aiocb_info, ns->blkconf.blk, 2492 nvme_misc_cb, req); 2493 2494 iocb->req = req; 2495 iocb->bh = qemu_bh_new(nvme_dsm_bh, iocb); 2496 iocb->ret = 0; 2497 iocb->range = g_new(NvmeDsmRange, nr); 2498 iocb->nr = nr; 2499 iocb->idx = 0; 2500 2501 status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr, 2502 req); 2503 if (status) { 2504 return status; 2505 } 2506 2507 req->aiocb = &iocb->common; 2508 nvme_dsm_cb(iocb, 0); 2509 2510 return NVME_NO_COMPLETE; 2511 } 2512 2513 return status; 2514 } 2515 2516 static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req) 2517 { 2518 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2519 NvmeNamespace *ns = req->ns; 2520 BlockBackend *blk = ns->blkconf.blk; 2521 uint64_t slba = le64_to_cpu(rw->slba); 2522 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; 2523 size_t len = nvme_l2b(ns, nlb); 2524 int64_t offset = nvme_l2b(ns, slba); 2525 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); 2526 uint32_t reftag = le32_to_cpu(rw->reftag); 2527 NvmeBounceContext *ctx = NULL; 2528 uint16_t status; 2529 2530 trace_pci_nvme_verify(nvme_cid(req), nvme_nsid(ns), slba, nlb); 2531 2532 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 2533 status = nvme_check_prinfo(ns, prinfo, slba, reftag); 2534 if (status) { 2535 return status; 2536 } 2537 2538 if (prinfo & NVME_PRINFO_PRACT) { 2539 return NVME_INVALID_PROT_INFO | NVME_DNR; 2540 } 2541 } 2542 2543 if (len > n->page_size << n->params.vsl) { 2544 return NVME_INVALID_FIELD | NVME_DNR; 2545 } 2546 2547 status = nvme_check_bounds(ns, slba, nlb); 2548 if (status) { 2549 return status; 2550 } 2551 2552 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { 2553 status = nvme_check_dulbe(ns, slba, nlb); 2554 if (status) { 2555 return status; 2556 } 2557 } 2558 2559 ctx = g_new0(NvmeBounceContext, 1); 2560 ctx->req = req; 2561 2562 ctx->data.bounce = g_malloc(len); 2563 2564 qemu_iovec_init(&ctx->data.iov, 1); 2565 qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len); 2566 2567 block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size, 2568 BLOCK_ACCT_READ); 2569 2570 req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0, 2571 nvme_verify_mdata_in_cb, ctx); 2572 return NVME_NO_COMPLETE; 2573 } 2574 2575 typedef struct NvmeCopyAIOCB { 2576 BlockAIOCB common; 2577 BlockAIOCB *aiocb; 2578 NvmeRequest *req; 2579 QEMUBH *bh; 2580 int ret; 2581 2582 void *ranges; 2583 unsigned int format; 2584 int nr; 2585 int idx; 2586 2587 uint8_t *bounce; 2588 QEMUIOVector iov; 2589 struct { 2590 BlockAcctCookie read; 2591 BlockAcctCookie write; 2592 } acct; 2593 2594 uint64_t reftag; 2595 uint64_t slba; 2596 2597 NvmeZone *zone; 2598 } NvmeCopyAIOCB; 2599 2600 static void nvme_copy_cancel(BlockAIOCB *aiocb) 2601 { 2602 NvmeCopyAIOCB *iocb = container_of(aiocb, NvmeCopyAIOCB, common); 2603 2604 iocb->ret = -ECANCELED; 2605 2606 if (iocb->aiocb) { 2607 blk_aio_cancel_async(iocb->aiocb); 2608 iocb->aiocb = NULL; 2609 } 2610 } 2611 2612 static const AIOCBInfo nvme_copy_aiocb_info = { 2613 .aiocb_size = sizeof(NvmeCopyAIOCB), 2614 .cancel_async = nvme_copy_cancel, 2615 }; 2616 2617 static void nvme_copy_bh(void *opaque) 2618 { 2619 NvmeCopyAIOCB *iocb = opaque; 2620 NvmeRequest *req = iocb->req; 2621 NvmeNamespace *ns = req->ns; 2622 BlockAcctStats *stats = blk_get_stats(ns->blkconf.blk); 2623 2624 if (iocb->idx != iocb->nr) { 2625 req->cqe.result = cpu_to_le32(iocb->idx); 2626 } 2627 2628 qemu_iovec_destroy(&iocb->iov); 2629 g_free(iocb->bounce); 2630 2631 qemu_bh_delete(iocb->bh); 2632 iocb->bh = NULL; 2633 2634 if (iocb->ret < 0) { 2635 block_acct_failed(stats, &iocb->acct.read); 2636 block_acct_failed(stats, &iocb->acct.write); 2637 } else { 2638 block_acct_done(stats, &iocb->acct.read); 2639 block_acct_done(stats, &iocb->acct.write); 2640 } 2641 2642 iocb->common.cb(iocb->common.opaque, iocb->ret); 2643 qemu_aio_unref(iocb); 2644 } 2645 2646 static void nvme_copy_cb(void *opaque, int ret); 2647 2648 static void nvme_copy_source_range_parse_format0(void *ranges, int idx, 2649 uint64_t *slba, uint32_t *nlb, 2650 uint16_t *apptag, 2651 uint16_t *appmask, 2652 uint64_t *reftag) 2653 { 2654 NvmeCopySourceRangeFormat0 *_ranges = ranges; 2655 2656 if (slba) { 2657 *slba = le64_to_cpu(_ranges[idx].slba); 2658 } 2659 2660 if (nlb) { 2661 *nlb = le16_to_cpu(_ranges[idx].nlb) + 1; 2662 } 2663 2664 if (apptag) { 2665 *apptag = le16_to_cpu(_ranges[idx].apptag); 2666 } 2667 2668 if (appmask) { 2669 *appmask = le16_to_cpu(_ranges[idx].appmask); 2670 } 2671 2672 if (reftag) { 2673 *reftag = le32_to_cpu(_ranges[idx].reftag); 2674 } 2675 } 2676 2677 static void nvme_copy_source_range_parse_format1(void *ranges, int idx, 2678 uint64_t *slba, uint32_t *nlb, 2679 uint16_t *apptag, 2680 uint16_t *appmask, 2681 uint64_t *reftag) 2682 { 2683 NvmeCopySourceRangeFormat1 *_ranges = ranges; 2684 2685 if (slba) { 2686 *slba = le64_to_cpu(_ranges[idx].slba); 2687 } 2688 2689 if (nlb) { 2690 *nlb = le16_to_cpu(_ranges[idx].nlb) + 1; 2691 } 2692 2693 if (apptag) { 2694 *apptag = le16_to_cpu(_ranges[idx].apptag); 2695 } 2696 2697 if (appmask) { 2698 *appmask = le16_to_cpu(_ranges[idx].appmask); 2699 } 2700 2701 if (reftag) { 2702 *reftag = 0; 2703 2704 *reftag |= (uint64_t)_ranges[idx].sr[4] << 40; 2705 *reftag |= (uint64_t)_ranges[idx].sr[5] << 32; 2706 *reftag |= (uint64_t)_ranges[idx].sr[6] << 24; 2707 *reftag |= (uint64_t)_ranges[idx].sr[7] << 16; 2708 *reftag |= (uint64_t)_ranges[idx].sr[8] << 8; 2709 *reftag |= (uint64_t)_ranges[idx].sr[9]; 2710 } 2711 } 2712 2713 static void nvme_copy_source_range_parse(void *ranges, int idx, uint8_t format, 2714 uint64_t *slba, uint32_t *nlb, 2715 uint16_t *apptag, uint16_t *appmask, 2716 uint64_t *reftag) 2717 { 2718 switch (format) { 2719 case NVME_COPY_FORMAT_0: 2720 nvme_copy_source_range_parse_format0(ranges, idx, slba, nlb, apptag, 2721 appmask, reftag); 2722 break; 2723 2724 case NVME_COPY_FORMAT_1: 2725 nvme_copy_source_range_parse_format1(ranges, idx, slba, nlb, apptag, 2726 appmask, reftag); 2727 break; 2728 2729 default: 2730 abort(); 2731 } 2732 } 2733 2734 static void nvme_copy_out_completed_cb(void *opaque, int ret) 2735 { 2736 NvmeCopyAIOCB *iocb = opaque; 2737 NvmeRequest *req = iocb->req; 2738 NvmeNamespace *ns = req->ns; 2739 uint32_t nlb; 2740 2741 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, 2742 &nlb, NULL, NULL, NULL); 2743 2744 if (ret < 0) { 2745 iocb->ret = ret; 2746 goto out; 2747 } else if (iocb->ret < 0) { 2748 goto out; 2749 } 2750 2751 if (ns->params.zoned) { 2752 nvme_advance_zone_wp(ns, iocb->zone, nlb); 2753 } 2754 2755 iocb->idx++; 2756 iocb->slba += nlb; 2757 out: 2758 nvme_copy_cb(iocb, iocb->ret); 2759 } 2760 2761 static void nvme_copy_out_cb(void *opaque, int ret) 2762 { 2763 NvmeCopyAIOCB *iocb = opaque; 2764 NvmeRequest *req = iocb->req; 2765 NvmeNamespace *ns = req->ns; 2766 uint32_t nlb; 2767 size_t mlen; 2768 uint8_t *mbounce; 2769 2770 if (ret < 0) { 2771 iocb->ret = ret; 2772 goto out; 2773 } else if (iocb->ret < 0) { 2774 goto out; 2775 } 2776 2777 if (!ns->lbaf.ms) { 2778 nvme_copy_out_completed_cb(iocb, 0); 2779 return; 2780 } 2781 2782 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, 2783 &nlb, NULL, NULL, NULL); 2784 2785 mlen = nvme_m2b(ns, nlb); 2786 mbounce = iocb->bounce + nvme_l2b(ns, nlb); 2787 2788 qemu_iovec_reset(&iocb->iov); 2789 qemu_iovec_add(&iocb->iov, mbounce, mlen); 2790 2791 iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_moff(ns, iocb->slba), 2792 &iocb->iov, 0, nvme_copy_out_completed_cb, 2793 iocb); 2794 2795 return; 2796 2797 out: 2798 nvme_copy_cb(iocb, ret); 2799 } 2800 2801 static void nvme_copy_in_completed_cb(void *opaque, int ret) 2802 { 2803 NvmeCopyAIOCB *iocb = opaque; 2804 NvmeRequest *req = iocb->req; 2805 NvmeNamespace *ns = req->ns; 2806 uint32_t nlb; 2807 uint64_t slba; 2808 uint16_t apptag, appmask; 2809 uint64_t reftag; 2810 size_t len; 2811 uint16_t status; 2812 2813 if (ret < 0) { 2814 iocb->ret = ret; 2815 goto out; 2816 } else if (iocb->ret < 0) { 2817 goto out; 2818 } 2819 2820 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, 2821 &nlb, &apptag, &appmask, &reftag); 2822 len = nvme_l2b(ns, nlb); 2823 2824 trace_pci_nvme_copy_out(iocb->slba, nlb); 2825 2826 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 2827 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; 2828 2829 uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); 2830 uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); 2831 2832 size_t mlen = nvme_m2b(ns, nlb); 2833 uint8_t *mbounce = iocb->bounce + nvme_l2b(ns, nlb); 2834 2835 status = nvme_dif_mangle_mdata(ns, mbounce, mlen, slba); 2836 if (status) { 2837 goto invalid; 2838 } 2839 status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, prinfor, 2840 slba, apptag, appmask, &reftag); 2841 if (status) { 2842 goto invalid; 2843 } 2844 2845 apptag = le16_to_cpu(copy->apptag); 2846 appmask = le16_to_cpu(copy->appmask); 2847 2848 if (prinfow & NVME_PRINFO_PRACT) { 2849 status = nvme_check_prinfo(ns, prinfow, iocb->slba, iocb->reftag); 2850 if (status) { 2851 goto invalid; 2852 } 2853 2854 nvme_dif_pract_generate_dif(ns, iocb->bounce, len, mbounce, mlen, 2855 apptag, &iocb->reftag); 2856 } else { 2857 status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, 2858 prinfow, iocb->slba, apptag, appmask, 2859 &iocb->reftag); 2860 if (status) { 2861 goto invalid; 2862 } 2863 } 2864 } 2865 2866 status = nvme_check_bounds(ns, iocb->slba, nlb); 2867 if (status) { 2868 goto invalid; 2869 } 2870 2871 if (ns->params.zoned) { 2872 status = nvme_check_zone_write(ns, iocb->zone, iocb->slba, nlb); 2873 if (status) { 2874 goto invalid; 2875 } 2876 2877 if (!(iocb->zone->d.za & NVME_ZA_ZRWA_VALID)) { 2878 iocb->zone->w_ptr += nlb; 2879 } 2880 } 2881 2882 qemu_iovec_reset(&iocb->iov); 2883 qemu_iovec_add(&iocb->iov, iocb->bounce, len); 2884 2885 iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, iocb->slba), 2886 &iocb->iov, 0, nvme_copy_out_cb, iocb); 2887 2888 return; 2889 2890 invalid: 2891 req->status = status; 2892 iocb->aiocb = NULL; 2893 if (iocb->bh) { 2894 qemu_bh_schedule(iocb->bh); 2895 } 2896 2897 return; 2898 2899 out: 2900 nvme_copy_cb(iocb, ret); 2901 } 2902 2903 static void nvme_copy_in_cb(void *opaque, int ret) 2904 { 2905 NvmeCopyAIOCB *iocb = opaque; 2906 NvmeRequest *req = iocb->req; 2907 NvmeNamespace *ns = req->ns; 2908 uint64_t slba; 2909 uint32_t nlb; 2910 2911 if (ret < 0) { 2912 iocb->ret = ret; 2913 goto out; 2914 } else if (iocb->ret < 0) { 2915 goto out; 2916 } 2917 2918 if (!ns->lbaf.ms) { 2919 nvme_copy_in_completed_cb(iocb, 0); 2920 return; 2921 } 2922 2923 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, 2924 &nlb, NULL, NULL, NULL); 2925 2926 qemu_iovec_reset(&iocb->iov); 2927 qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(ns, nlb), 2928 nvme_m2b(ns, nlb)); 2929 2930 iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_moff(ns, slba), 2931 &iocb->iov, 0, nvme_copy_in_completed_cb, 2932 iocb); 2933 return; 2934 2935 out: 2936 nvme_copy_cb(iocb, iocb->ret); 2937 } 2938 2939 static void nvme_copy_cb(void *opaque, int ret) 2940 { 2941 NvmeCopyAIOCB *iocb = opaque; 2942 NvmeRequest *req = iocb->req; 2943 NvmeNamespace *ns = req->ns; 2944 uint64_t slba; 2945 uint32_t nlb; 2946 size_t len; 2947 uint16_t status; 2948 2949 if (ret < 0) { 2950 iocb->ret = ret; 2951 goto done; 2952 } else if (iocb->ret < 0) { 2953 goto done; 2954 } 2955 2956 if (iocb->idx == iocb->nr) { 2957 goto done; 2958 } 2959 2960 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, 2961 &nlb, NULL, NULL, NULL); 2962 len = nvme_l2b(ns, nlb); 2963 2964 trace_pci_nvme_copy_source_range(slba, nlb); 2965 2966 if (nlb > le16_to_cpu(ns->id_ns.mssrl)) { 2967 status = NVME_CMD_SIZE_LIMIT | NVME_DNR; 2968 goto invalid; 2969 } 2970 2971 status = nvme_check_bounds(ns, slba, nlb); 2972 if (status) { 2973 goto invalid; 2974 } 2975 2976 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { 2977 status = nvme_check_dulbe(ns, slba, nlb); 2978 if (status) { 2979 goto invalid; 2980 } 2981 } 2982 2983 if (ns->params.zoned) { 2984 status = nvme_check_zone_read(ns, slba, nlb); 2985 if (status) { 2986 goto invalid; 2987 } 2988 } 2989 2990 qemu_iovec_reset(&iocb->iov); 2991 qemu_iovec_add(&iocb->iov, iocb->bounce, len); 2992 2993 iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_l2b(ns, slba), 2994 &iocb->iov, 0, nvme_copy_in_cb, iocb); 2995 return; 2996 2997 invalid: 2998 req->status = status; 2999 done: 3000 iocb->aiocb = NULL; 3001 if (iocb->bh) { 3002 qemu_bh_schedule(iocb->bh); 3003 } 3004 } 3005 3006 3007 static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) 3008 { 3009 NvmeNamespace *ns = req->ns; 3010 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; 3011 NvmeCopyAIOCB *iocb = blk_aio_get(&nvme_copy_aiocb_info, ns->blkconf.blk, 3012 nvme_misc_cb, req); 3013 uint16_t nr = copy->nr + 1; 3014 uint8_t format = copy->control[0] & 0xf; 3015 uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); 3016 uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); 3017 size_t len = sizeof(NvmeCopySourceRangeFormat0); 3018 3019 uint16_t status; 3020 3021 trace_pci_nvme_copy(nvme_cid(req), nvme_nsid(ns), nr, format); 3022 3023 iocb->ranges = NULL; 3024 iocb->zone = NULL; 3025 3026 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && 3027 ((prinfor & NVME_PRINFO_PRACT) != (prinfow & NVME_PRINFO_PRACT))) { 3028 status = NVME_INVALID_FIELD | NVME_DNR; 3029 goto invalid; 3030 } 3031 3032 if (!(n->id_ctrl.ocfs & (1 << format))) { 3033 trace_pci_nvme_err_copy_invalid_format(format); 3034 status = NVME_INVALID_FIELD | NVME_DNR; 3035 goto invalid; 3036 } 3037 3038 if (nr > ns->id_ns.msrc + 1) { 3039 status = NVME_CMD_SIZE_LIMIT | NVME_DNR; 3040 goto invalid; 3041 } 3042 3043 if (ns->pif && format != 0x1) { 3044 status = NVME_INVALID_FORMAT | NVME_DNR; 3045 goto invalid; 3046 } 3047 3048 if (ns->pif) { 3049 len = sizeof(NvmeCopySourceRangeFormat1); 3050 } 3051 3052 iocb->format = format; 3053 iocb->ranges = g_malloc_n(nr, len); 3054 status = nvme_h2c(n, (uint8_t *)iocb->ranges, len * nr, req); 3055 if (status) { 3056 goto invalid; 3057 } 3058 3059 iocb->slba = le64_to_cpu(copy->sdlba); 3060 3061 if (ns->params.zoned) { 3062 iocb->zone = nvme_get_zone_by_slba(ns, iocb->slba); 3063 if (!iocb->zone) { 3064 status = NVME_LBA_RANGE | NVME_DNR; 3065 goto invalid; 3066 } 3067 3068 status = nvme_zrm_auto(n, ns, iocb->zone); 3069 if (status) { 3070 goto invalid; 3071 } 3072 } 3073 3074 iocb->req = req; 3075 iocb->bh = qemu_bh_new(nvme_copy_bh, iocb); 3076 iocb->ret = 0; 3077 iocb->nr = nr; 3078 iocb->idx = 0; 3079 iocb->reftag = le32_to_cpu(copy->reftag); 3080 iocb->reftag |= (uint64_t)le32_to_cpu(copy->cdw3) << 32; 3081 iocb->bounce = g_malloc_n(le16_to_cpu(ns->id_ns.mssrl), 3082 ns->lbasz + ns->lbaf.ms); 3083 3084 qemu_iovec_init(&iocb->iov, 1); 3085 3086 block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.read, 0, 3087 BLOCK_ACCT_READ); 3088 block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.write, 0, 3089 BLOCK_ACCT_WRITE); 3090 3091 req->aiocb = &iocb->common; 3092 nvme_copy_cb(iocb, 0); 3093 3094 return NVME_NO_COMPLETE; 3095 3096 invalid: 3097 g_free(iocb->ranges); 3098 qemu_aio_unref(iocb); 3099 return status; 3100 } 3101 3102 static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req) 3103 { 3104 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 3105 NvmeNamespace *ns = req->ns; 3106 BlockBackend *blk = ns->blkconf.blk; 3107 uint64_t slba = le64_to_cpu(rw->slba); 3108 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; 3109 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); 3110 size_t data_len = nvme_l2b(ns, nlb); 3111 size_t len = data_len; 3112 int64_t offset = nvme_l2b(ns, slba); 3113 struct nvme_compare_ctx *ctx = NULL; 3114 uint16_t status; 3115 3116 trace_pci_nvme_compare(nvme_cid(req), nvme_nsid(ns), slba, nlb); 3117 3118 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && (prinfo & NVME_PRINFO_PRACT)) { 3119 return NVME_INVALID_PROT_INFO | NVME_DNR; 3120 } 3121 3122 if (nvme_ns_ext(ns)) { 3123 len += nvme_m2b(ns, nlb); 3124 } 3125 3126 status = nvme_check_mdts(n, len); 3127 if (status) { 3128 return status; 3129 } 3130 3131 status = nvme_check_bounds(ns, slba, nlb); 3132 if (status) { 3133 return status; 3134 } 3135 3136 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { 3137 status = nvme_check_dulbe(ns, slba, nlb); 3138 if (status) { 3139 return status; 3140 } 3141 } 3142 3143 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); 3144 if (status) { 3145 return status; 3146 } 3147 3148 ctx = g_new(struct nvme_compare_ctx, 1); 3149 ctx->data.bounce = g_malloc(data_len); 3150 3151 req->opaque = ctx; 3152 3153 qemu_iovec_init(&ctx->data.iov, 1); 3154 qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, data_len); 3155 3156 block_acct_start(blk_get_stats(blk), &req->acct, data_len, 3157 BLOCK_ACCT_READ); 3158 req->aiocb = blk_aio_preadv(blk, offset, &ctx->data.iov, 0, 3159 nvme_compare_data_cb, req); 3160 3161 return NVME_NO_COMPLETE; 3162 } 3163 3164 typedef struct NvmeFlushAIOCB { 3165 BlockAIOCB common; 3166 BlockAIOCB *aiocb; 3167 NvmeRequest *req; 3168 QEMUBH *bh; 3169 int ret; 3170 3171 NvmeNamespace *ns; 3172 uint32_t nsid; 3173 bool broadcast; 3174 } NvmeFlushAIOCB; 3175 3176 static void nvme_flush_cancel(BlockAIOCB *acb) 3177 { 3178 NvmeFlushAIOCB *iocb = container_of(acb, NvmeFlushAIOCB, common); 3179 3180 iocb->ret = -ECANCELED; 3181 3182 if (iocb->aiocb) { 3183 blk_aio_cancel_async(iocb->aiocb); 3184 } 3185 } 3186 3187 static const AIOCBInfo nvme_flush_aiocb_info = { 3188 .aiocb_size = sizeof(NvmeFlushAIOCB), 3189 .cancel_async = nvme_flush_cancel, 3190 .get_aio_context = nvme_get_aio_context, 3191 }; 3192 3193 static void nvme_flush_ns_cb(void *opaque, int ret) 3194 { 3195 NvmeFlushAIOCB *iocb = opaque; 3196 NvmeNamespace *ns = iocb->ns; 3197 3198 if (ret < 0) { 3199 iocb->ret = ret; 3200 goto out; 3201 } else if (iocb->ret < 0) { 3202 goto out; 3203 } 3204 3205 if (ns) { 3206 trace_pci_nvme_flush_ns(iocb->nsid); 3207 3208 iocb->ns = NULL; 3209 iocb->aiocb = blk_aio_flush(ns->blkconf.blk, nvme_flush_ns_cb, iocb); 3210 return; 3211 } 3212 3213 out: 3214 iocb->aiocb = NULL; 3215 qemu_bh_schedule(iocb->bh); 3216 } 3217 3218 static void nvme_flush_bh(void *opaque) 3219 { 3220 NvmeFlushAIOCB *iocb = opaque; 3221 NvmeRequest *req = iocb->req; 3222 NvmeCtrl *n = nvme_ctrl(req); 3223 int i; 3224 3225 if (iocb->ret < 0) { 3226 goto done; 3227 } 3228 3229 if (iocb->broadcast) { 3230 for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { 3231 iocb->ns = nvme_ns(n, i); 3232 if (iocb->ns) { 3233 iocb->nsid = i; 3234 break; 3235 } 3236 } 3237 } 3238 3239 if (!iocb->ns) { 3240 goto done; 3241 } 3242 3243 nvme_flush_ns_cb(iocb, 0); 3244 return; 3245 3246 done: 3247 qemu_bh_delete(iocb->bh); 3248 iocb->bh = NULL; 3249 3250 iocb->common.cb(iocb->common.opaque, iocb->ret); 3251 3252 qemu_aio_unref(iocb); 3253 3254 return; 3255 } 3256 3257 static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) 3258 { 3259 NvmeFlushAIOCB *iocb; 3260 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 3261 uint16_t status; 3262 3263 iocb = qemu_aio_get(&nvme_flush_aiocb_info, NULL, nvme_misc_cb, req); 3264 3265 iocb->req = req; 3266 iocb->bh = qemu_bh_new(nvme_flush_bh, iocb); 3267 iocb->ret = 0; 3268 iocb->ns = NULL; 3269 iocb->nsid = 0; 3270 iocb->broadcast = (nsid == NVME_NSID_BROADCAST); 3271 3272 if (!iocb->broadcast) { 3273 if (!nvme_nsid_valid(n, nsid)) { 3274 status = NVME_INVALID_NSID | NVME_DNR; 3275 goto out; 3276 } 3277 3278 iocb->ns = nvme_ns(n, nsid); 3279 if (!iocb->ns) { 3280 status = NVME_INVALID_FIELD | NVME_DNR; 3281 goto out; 3282 } 3283 3284 iocb->nsid = nsid; 3285 } 3286 3287 req->aiocb = &iocb->common; 3288 qemu_bh_schedule(iocb->bh); 3289 3290 return NVME_NO_COMPLETE; 3291 3292 out: 3293 qemu_bh_delete(iocb->bh); 3294 iocb->bh = NULL; 3295 qemu_aio_unref(iocb); 3296 3297 return status; 3298 } 3299 3300 static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) 3301 { 3302 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 3303 NvmeNamespace *ns = req->ns; 3304 uint64_t slba = le64_to_cpu(rw->slba); 3305 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; 3306 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); 3307 uint64_t data_size = nvme_l2b(ns, nlb); 3308 uint64_t mapped_size = data_size; 3309 uint64_t data_offset; 3310 BlockBackend *blk = ns->blkconf.blk; 3311 uint16_t status; 3312 3313 if (nvme_ns_ext(ns)) { 3314 mapped_size += nvme_m2b(ns, nlb); 3315 3316 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 3317 bool pract = prinfo & NVME_PRINFO_PRACT; 3318 3319 if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { 3320 mapped_size = data_size; 3321 } 3322 } 3323 } 3324 3325 trace_pci_nvme_read(nvme_cid(req), nvme_nsid(ns), nlb, mapped_size, slba); 3326 3327 status = nvme_check_mdts(n, mapped_size); 3328 if (status) { 3329 goto invalid; 3330 } 3331 3332 status = nvme_check_bounds(ns, slba, nlb); 3333 if (status) { 3334 goto invalid; 3335 } 3336 3337 if (ns->params.zoned) { 3338 status = nvme_check_zone_read(ns, slba, nlb); 3339 if (status) { 3340 trace_pci_nvme_err_zone_read_not_ok(slba, nlb, status); 3341 goto invalid; 3342 } 3343 } 3344 3345 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { 3346 status = nvme_check_dulbe(ns, slba, nlb); 3347 if (status) { 3348 goto invalid; 3349 } 3350 } 3351 3352 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 3353 return nvme_dif_rw(n, req); 3354 } 3355 3356 status = nvme_map_data(n, nlb, req); 3357 if (status) { 3358 goto invalid; 3359 } 3360 3361 data_offset = nvme_l2b(ns, slba); 3362 3363 block_acct_start(blk_get_stats(blk), &req->acct, data_size, 3364 BLOCK_ACCT_READ); 3365 nvme_blk_read(blk, data_offset, nvme_rw_cb, req); 3366 return NVME_NO_COMPLETE; 3367 3368 invalid: 3369 block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ); 3370 return status | NVME_DNR; 3371 } 3372 3373 static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, 3374 bool wrz) 3375 { 3376 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 3377 NvmeNamespace *ns = req->ns; 3378 uint64_t slba = le64_to_cpu(rw->slba); 3379 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; 3380 uint16_t ctrl = le16_to_cpu(rw->control); 3381 uint8_t prinfo = NVME_RW_PRINFO(ctrl); 3382 uint64_t data_size = nvme_l2b(ns, nlb); 3383 uint64_t mapped_size = data_size; 3384 uint64_t data_offset; 3385 NvmeZone *zone; 3386 NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe; 3387 BlockBackend *blk = ns->blkconf.blk; 3388 uint16_t status; 3389 3390 if (nvme_ns_ext(ns)) { 3391 mapped_size += nvme_m2b(ns, nlb); 3392 3393 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 3394 bool pract = prinfo & NVME_PRINFO_PRACT; 3395 3396 if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { 3397 mapped_size -= nvme_m2b(ns, nlb); 3398 } 3399 } 3400 } 3401 3402 trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode), 3403 nvme_nsid(ns), nlb, mapped_size, slba); 3404 3405 if (!wrz) { 3406 status = nvme_check_mdts(n, mapped_size); 3407 if (status) { 3408 goto invalid; 3409 } 3410 } 3411 3412 status = nvme_check_bounds(ns, slba, nlb); 3413 if (status) { 3414 goto invalid; 3415 } 3416 3417 if (ns->params.zoned) { 3418 zone = nvme_get_zone_by_slba(ns, slba); 3419 assert(zone); 3420 3421 if (append) { 3422 bool piremap = !!(ctrl & NVME_RW_PIREMAP); 3423 3424 if (unlikely(zone->d.za & NVME_ZA_ZRWA_VALID)) { 3425 return NVME_INVALID_ZONE_OP | NVME_DNR; 3426 } 3427 3428 if (unlikely(slba != zone->d.zslba)) { 3429 trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba); 3430 status = NVME_INVALID_FIELD; 3431 goto invalid; 3432 } 3433 3434 if (n->params.zasl && 3435 data_size > (uint64_t)n->page_size << n->params.zasl) { 3436 trace_pci_nvme_err_zasl(data_size); 3437 return NVME_INVALID_FIELD | NVME_DNR; 3438 } 3439 3440 slba = zone->w_ptr; 3441 rw->slba = cpu_to_le64(slba); 3442 res->slba = cpu_to_le64(slba); 3443 3444 switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 3445 case NVME_ID_NS_DPS_TYPE_1: 3446 if (!piremap) { 3447 return NVME_INVALID_PROT_INFO | NVME_DNR; 3448 } 3449 3450 /* fallthrough */ 3451 3452 case NVME_ID_NS_DPS_TYPE_2: 3453 if (piremap) { 3454 uint32_t reftag = le32_to_cpu(rw->reftag); 3455 rw->reftag = cpu_to_le32(reftag + (slba - zone->d.zslba)); 3456 } 3457 3458 break; 3459 3460 case NVME_ID_NS_DPS_TYPE_3: 3461 if (piremap) { 3462 return NVME_INVALID_PROT_INFO | NVME_DNR; 3463 } 3464 3465 break; 3466 } 3467 } 3468 3469 status = nvme_check_zone_write(ns, zone, slba, nlb); 3470 if (status) { 3471 goto invalid; 3472 } 3473 3474 status = nvme_zrm_auto(n, ns, zone); 3475 if (status) { 3476 goto invalid; 3477 } 3478 3479 if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { 3480 zone->w_ptr += nlb; 3481 } 3482 } 3483 3484 data_offset = nvme_l2b(ns, slba); 3485 3486 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 3487 return nvme_dif_rw(n, req); 3488 } 3489 3490 if (!wrz) { 3491 status = nvme_map_data(n, nlb, req); 3492 if (status) { 3493 goto invalid; 3494 } 3495 3496 block_acct_start(blk_get_stats(blk), &req->acct, data_size, 3497 BLOCK_ACCT_WRITE); 3498 nvme_blk_write(blk, data_offset, nvme_rw_cb, req); 3499 } else { 3500 req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size, 3501 BDRV_REQ_MAY_UNMAP, nvme_rw_cb, 3502 req); 3503 } 3504 3505 return NVME_NO_COMPLETE; 3506 3507 invalid: 3508 block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE); 3509 return status | NVME_DNR; 3510 } 3511 3512 static inline uint16_t nvme_write(NvmeCtrl *n, NvmeRequest *req) 3513 { 3514 return nvme_do_write(n, req, false, false); 3515 } 3516 3517 static inline uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) 3518 { 3519 return nvme_do_write(n, req, false, true); 3520 } 3521 3522 static inline uint16_t nvme_zone_append(NvmeCtrl *n, NvmeRequest *req) 3523 { 3524 return nvme_do_write(n, req, true, false); 3525 } 3526 3527 static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace *ns, NvmeCmd *c, 3528 uint64_t *slba, uint32_t *zone_idx) 3529 { 3530 uint32_t dw10 = le32_to_cpu(c->cdw10); 3531 uint32_t dw11 = le32_to_cpu(c->cdw11); 3532 3533 if (!ns->params.zoned) { 3534 trace_pci_nvme_err_invalid_opc(c->opcode); 3535 return NVME_INVALID_OPCODE | NVME_DNR; 3536 } 3537 3538 *slba = ((uint64_t)dw11) << 32 | dw10; 3539 if (unlikely(*slba >= ns->id_ns.nsze)) { 3540 trace_pci_nvme_err_invalid_lba_range(*slba, 0, ns->id_ns.nsze); 3541 *slba = 0; 3542 return NVME_LBA_RANGE | NVME_DNR; 3543 } 3544 3545 *zone_idx = nvme_zone_idx(ns, *slba); 3546 assert(*zone_idx < ns->num_zones); 3547 3548 return NVME_SUCCESS; 3549 } 3550 3551 typedef uint16_t (*op_handler_t)(NvmeNamespace *, NvmeZone *, NvmeZoneState, 3552 NvmeRequest *); 3553 3554 enum NvmeZoneProcessingMask { 3555 NVME_PROC_CURRENT_ZONE = 0, 3556 NVME_PROC_OPENED_ZONES = 1 << 0, 3557 NVME_PROC_CLOSED_ZONES = 1 << 1, 3558 NVME_PROC_READ_ONLY_ZONES = 1 << 2, 3559 NVME_PROC_FULL_ZONES = 1 << 3, 3560 }; 3561 3562 static uint16_t nvme_open_zone(NvmeNamespace *ns, NvmeZone *zone, 3563 NvmeZoneState state, NvmeRequest *req) 3564 { 3565 NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd; 3566 int flags = 0; 3567 3568 if (cmd->zsflags & NVME_ZSFLAG_ZRWA_ALLOC) { 3569 uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs); 3570 3571 if (!(ozcs & NVME_ID_NS_ZONED_OZCS_ZRWASUP)) { 3572 return NVME_INVALID_ZONE_OP | NVME_DNR; 3573 } 3574 3575 if (zone->w_ptr % ns->zns.zrwafg) { 3576 return NVME_NOZRWA | NVME_DNR; 3577 } 3578 3579 flags = NVME_ZRM_ZRWA; 3580 } 3581 3582 return nvme_zrm_open_flags(nvme_ctrl(req), ns, zone, flags); 3583 } 3584 3585 static uint16_t nvme_close_zone(NvmeNamespace *ns, NvmeZone *zone, 3586 NvmeZoneState state, NvmeRequest *req) 3587 { 3588 return nvme_zrm_close(ns, zone); 3589 } 3590 3591 static uint16_t nvme_finish_zone(NvmeNamespace *ns, NvmeZone *zone, 3592 NvmeZoneState state, NvmeRequest *req) 3593 { 3594 return nvme_zrm_finish(ns, zone); 3595 } 3596 3597 static uint16_t nvme_offline_zone(NvmeNamespace *ns, NvmeZone *zone, 3598 NvmeZoneState state, NvmeRequest *req) 3599 { 3600 switch (state) { 3601 case NVME_ZONE_STATE_READ_ONLY: 3602 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_OFFLINE); 3603 /* fall through */ 3604 case NVME_ZONE_STATE_OFFLINE: 3605 return NVME_SUCCESS; 3606 default: 3607 return NVME_ZONE_INVAL_TRANSITION; 3608 } 3609 } 3610 3611 static uint16_t nvme_set_zd_ext(NvmeNamespace *ns, NvmeZone *zone) 3612 { 3613 uint16_t status; 3614 uint8_t state = nvme_get_zone_state(zone); 3615 3616 if (state == NVME_ZONE_STATE_EMPTY) { 3617 status = nvme_aor_check(ns, 1, 0); 3618 if (status) { 3619 return status; 3620 } 3621 nvme_aor_inc_active(ns); 3622 zone->d.za |= NVME_ZA_ZD_EXT_VALID; 3623 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED); 3624 return NVME_SUCCESS; 3625 } 3626 3627 return NVME_ZONE_INVAL_TRANSITION; 3628 } 3629 3630 static uint16_t nvme_bulk_proc_zone(NvmeNamespace *ns, NvmeZone *zone, 3631 enum NvmeZoneProcessingMask proc_mask, 3632 op_handler_t op_hndlr, NvmeRequest *req) 3633 { 3634 uint16_t status = NVME_SUCCESS; 3635 NvmeZoneState zs = nvme_get_zone_state(zone); 3636 bool proc_zone; 3637 3638 switch (zs) { 3639 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 3640 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 3641 proc_zone = proc_mask & NVME_PROC_OPENED_ZONES; 3642 break; 3643 case NVME_ZONE_STATE_CLOSED: 3644 proc_zone = proc_mask & NVME_PROC_CLOSED_ZONES; 3645 break; 3646 case NVME_ZONE_STATE_READ_ONLY: 3647 proc_zone = proc_mask & NVME_PROC_READ_ONLY_ZONES; 3648 break; 3649 case NVME_ZONE_STATE_FULL: 3650 proc_zone = proc_mask & NVME_PROC_FULL_ZONES; 3651 break; 3652 default: 3653 proc_zone = false; 3654 } 3655 3656 if (proc_zone) { 3657 status = op_hndlr(ns, zone, zs, req); 3658 } 3659 3660 return status; 3661 } 3662 3663 static uint16_t nvme_do_zone_op(NvmeNamespace *ns, NvmeZone *zone, 3664 enum NvmeZoneProcessingMask proc_mask, 3665 op_handler_t op_hndlr, NvmeRequest *req) 3666 { 3667 NvmeZone *next; 3668 uint16_t status = NVME_SUCCESS; 3669 int i; 3670 3671 if (!proc_mask) { 3672 status = op_hndlr(ns, zone, nvme_get_zone_state(zone), req); 3673 } else { 3674 if (proc_mask & NVME_PROC_CLOSED_ZONES) { 3675 QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) { 3676 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, 3677 req); 3678 if (status && status != NVME_NO_COMPLETE) { 3679 goto out; 3680 } 3681 } 3682 } 3683 if (proc_mask & NVME_PROC_OPENED_ZONES) { 3684 QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) { 3685 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, 3686 req); 3687 if (status && status != NVME_NO_COMPLETE) { 3688 goto out; 3689 } 3690 } 3691 3692 QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) { 3693 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, 3694 req); 3695 if (status && status != NVME_NO_COMPLETE) { 3696 goto out; 3697 } 3698 } 3699 } 3700 if (proc_mask & NVME_PROC_FULL_ZONES) { 3701 QTAILQ_FOREACH_SAFE(zone, &ns->full_zones, entry, next) { 3702 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, 3703 req); 3704 if (status && status != NVME_NO_COMPLETE) { 3705 goto out; 3706 } 3707 } 3708 } 3709 3710 if (proc_mask & NVME_PROC_READ_ONLY_ZONES) { 3711 for (i = 0; i < ns->num_zones; i++, zone++) { 3712 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, 3713 req); 3714 if (status && status != NVME_NO_COMPLETE) { 3715 goto out; 3716 } 3717 } 3718 } 3719 } 3720 3721 out: 3722 return status; 3723 } 3724 3725 typedef struct NvmeZoneResetAIOCB { 3726 BlockAIOCB common; 3727 BlockAIOCB *aiocb; 3728 NvmeRequest *req; 3729 QEMUBH *bh; 3730 int ret; 3731 3732 bool all; 3733 int idx; 3734 NvmeZone *zone; 3735 } NvmeZoneResetAIOCB; 3736 3737 static void nvme_zone_reset_cancel(BlockAIOCB *aiocb) 3738 { 3739 NvmeZoneResetAIOCB *iocb = container_of(aiocb, NvmeZoneResetAIOCB, common); 3740 NvmeRequest *req = iocb->req; 3741 NvmeNamespace *ns = req->ns; 3742 3743 iocb->idx = ns->num_zones; 3744 3745 iocb->ret = -ECANCELED; 3746 3747 if (iocb->aiocb) { 3748 blk_aio_cancel_async(iocb->aiocb); 3749 iocb->aiocb = NULL; 3750 } 3751 } 3752 3753 static const AIOCBInfo nvme_zone_reset_aiocb_info = { 3754 .aiocb_size = sizeof(NvmeZoneResetAIOCB), 3755 .cancel_async = nvme_zone_reset_cancel, 3756 }; 3757 3758 static void nvme_zone_reset_bh(void *opaque) 3759 { 3760 NvmeZoneResetAIOCB *iocb = opaque; 3761 3762 iocb->common.cb(iocb->common.opaque, iocb->ret); 3763 3764 qemu_bh_delete(iocb->bh); 3765 iocb->bh = NULL; 3766 qemu_aio_unref(iocb); 3767 } 3768 3769 static void nvme_zone_reset_cb(void *opaque, int ret); 3770 3771 static void nvme_zone_reset_epilogue_cb(void *opaque, int ret) 3772 { 3773 NvmeZoneResetAIOCB *iocb = opaque; 3774 NvmeRequest *req = iocb->req; 3775 NvmeNamespace *ns = req->ns; 3776 int64_t moff; 3777 int count; 3778 3779 if (ret < 0) { 3780 nvme_zone_reset_cb(iocb, ret); 3781 return; 3782 } 3783 3784 if (!ns->lbaf.ms) { 3785 nvme_zone_reset_cb(iocb, 0); 3786 return; 3787 } 3788 3789 moff = nvme_moff(ns, iocb->zone->d.zslba); 3790 count = nvme_m2b(ns, ns->zone_size); 3791 3792 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, moff, count, 3793 BDRV_REQ_MAY_UNMAP, 3794 nvme_zone_reset_cb, iocb); 3795 return; 3796 } 3797 3798 static void nvme_zone_reset_cb(void *opaque, int ret) 3799 { 3800 NvmeZoneResetAIOCB *iocb = opaque; 3801 NvmeRequest *req = iocb->req; 3802 NvmeNamespace *ns = req->ns; 3803 3804 if (ret < 0) { 3805 iocb->ret = ret; 3806 goto done; 3807 } 3808 3809 if (iocb->zone) { 3810 nvme_zrm_reset(ns, iocb->zone); 3811 3812 if (!iocb->all) { 3813 goto done; 3814 } 3815 } 3816 3817 while (iocb->idx < ns->num_zones) { 3818 NvmeZone *zone = &ns->zone_array[iocb->idx++]; 3819 3820 switch (nvme_get_zone_state(zone)) { 3821 case NVME_ZONE_STATE_EMPTY: 3822 if (!iocb->all) { 3823 goto done; 3824 } 3825 3826 continue; 3827 3828 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 3829 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 3830 case NVME_ZONE_STATE_CLOSED: 3831 case NVME_ZONE_STATE_FULL: 3832 iocb->zone = zone; 3833 break; 3834 3835 default: 3836 continue; 3837 } 3838 3839 trace_pci_nvme_zns_zone_reset(zone->d.zslba); 3840 3841 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, 3842 nvme_l2b(ns, zone->d.zslba), 3843 nvme_l2b(ns, ns->zone_size), 3844 BDRV_REQ_MAY_UNMAP, 3845 nvme_zone_reset_epilogue_cb, 3846 iocb); 3847 return; 3848 } 3849 3850 done: 3851 iocb->aiocb = NULL; 3852 if (iocb->bh) { 3853 qemu_bh_schedule(iocb->bh); 3854 } 3855 } 3856 3857 static uint16_t nvme_zone_mgmt_send_zrwa_flush(NvmeCtrl *n, NvmeZone *zone, 3858 uint64_t elba, NvmeRequest *req) 3859 { 3860 NvmeNamespace *ns = req->ns; 3861 uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs); 3862 uint64_t wp = zone->d.wp; 3863 uint32_t nlb = elba - wp + 1; 3864 uint16_t status; 3865 3866 3867 if (!(ozcs & NVME_ID_NS_ZONED_OZCS_ZRWASUP)) { 3868 return NVME_INVALID_ZONE_OP | NVME_DNR; 3869 } 3870 3871 if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { 3872 return NVME_INVALID_FIELD | NVME_DNR; 3873 } 3874 3875 if (elba < wp || elba > wp + ns->zns.zrwas) { 3876 return NVME_ZONE_BOUNDARY_ERROR | NVME_DNR; 3877 } 3878 3879 if (nlb % ns->zns.zrwafg) { 3880 return NVME_INVALID_FIELD | NVME_DNR; 3881 } 3882 3883 status = nvme_zrm_auto(n, ns, zone); 3884 if (status) { 3885 return status; 3886 } 3887 3888 zone->w_ptr += nlb; 3889 3890 nvme_advance_zone_wp(ns, zone, nlb); 3891 3892 return NVME_SUCCESS; 3893 } 3894 3895 static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req) 3896 { 3897 NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd; 3898 NvmeNamespace *ns = req->ns; 3899 NvmeZone *zone; 3900 NvmeZoneResetAIOCB *iocb; 3901 uint8_t *zd_ext; 3902 uint64_t slba = 0; 3903 uint32_t zone_idx = 0; 3904 uint16_t status; 3905 uint8_t action = cmd->zsa; 3906 bool all; 3907 enum NvmeZoneProcessingMask proc_mask = NVME_PROC_CURRENT_ZONE; 3908 3909 all = cmd->zsflags & NVME_ZSFLAG_SELECT_ALL; 3910 3911 req->status = NVME_SUCCESS; 3912 3913 if (!all) { 3914 status = nvme_get_mgmt_zone_slba_idx(ns, &req->cmd, &slba, &zone_idx); 3915 if (status) { 3916 return status; 3917 } 3918 } 3919 3920 zone = &ns->zone_array[zone_idx]; 3921 if (slba != zone->d.zslba && action != NVME_ZONE_ACTION_ZRWA_FLUSH) { 3922 trace_pci_nvme_err_unaligned_zone_cmd(action, slba, zone->d.zslba); 3923 return NVME_INVALID_FIELD | NVME_DNR; 3924 } 3925 3926 switch (action) { 3927 3928 case NVME_ZONE_ACTION_OPEN: 3929 if (all) { 3930 proc_mask = NVME_PROC_CLOSED_ZONES; 3931 } 3932 trace_pci_nvme_open_zone(slba, zone_idx, all); 3933 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_open_zone, req); 3934 break; 3935 3936 case NVME_ZONE_ACTION_CLOSE: 3937 if (all) { 3938 proc_mask = NVME_PROC_OPENED_ZONES; 3939 } 3940 trace_pci_nvme_close_zone(slba, zone_idx, all); 3941 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_close_zone, req); 3942 break; 3943 3944 case NVME_ZONE_ACTION_FINISH: 3945 if (all) { 3946 proc_mask = NVME_PROC_OPENED_ZONES | NVME_PROC_CLOSED_ZONES; 3947 } 3948 trace_pci_nvme_finish_zone(slba, zone_idx, all); 3949 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_finish_zone, req); 3950 break; 3951 3952 case NVME_ZONE_ACTION_RESET: 3953 trace_pci_nvme_reset_zone(slba, zone_idx, all); 3954 3955 iocb = blk_aio_get(&nvme_zone_reset_aiocb_info, ns->blkconf.blk, 3956 nvme_misc_cb, req); 3957 3958 iocb->req = req; 3959 iocb->bh = qemu_bh_new(nvme_zone_reset_bh, iocb); 3960 iocb->ret = 0; 3961 iocb->all = all; 3962 iocb->idx = zone_idx; 3963 iocb->zone = NULL; 3964 3965 req->aiocb = &iocb->common; 3966 nvme_zone_reset_cb(iocb, 0); 3967 3968 return NVME_NO_COMPLETE; 3969 3970 case NVME_ZONE_ACTION_OFFLINE: 3971 if (all) { 3972 proc_mask = NVME_PROC_READ_ONLY_ZONES; 3973 } 3974 trace_pci_nvme_offline_zone(slba, zone_idx, all); 3975 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_offline_zone, req); 3976 break; 3977 3978 case NVME_ZONE_ACTION_SET_ZD_EXT: 3979 trace_pci_nvme_set_descriptor_extension(slba, zone_idx); 3980 if (all || !ns->params.zd_extension_size) { 3981 return NVME_INVALID_FIELD | NVME_DNR; 3982 } 3983 zd_ext = nvme_get_zd_extension(ns, zone_idx); 3984 status = nvme_h2c(n, zd_ext, ns->params.zd_extension_size, req); 3985 if (status) { 3986 trace_pci_nvme_err_zd_extension_map_error(zone_idx); 3987 return status; 3988 } 3989 3990 status = nvme_set_zd_ext(ns, zone); 3991 if (status == NVME_SUCCESS) { 3992 trace_pci_nvme_zd_extension_set(zone_idx); 3993 return status; 3994 } 3995 break; 3996 3997 case NVME_ZONE_ACTION_ZRWA_FLUSH: 3998 if (all) { 3999 return NVME_INVALID_FIELD | NVME_DNR; 4000 } 4001 4002 return nvme_zone_mgmt_send_zrwa_flush(n, zone, slba, req); 4003 4004 default: 4005 trace_pci_nvme_err_invalid_mgmt_action(action); 4006 status = NVME_INVALID_FIELD; 4007 } 4008 4009 if (status == NVME_ZONE_INVAL_TRANSITION) { 4010 trace_pci_nvme_err_invalid_zone_state_transition(action, slba, 4011 zone->d.za); 4012 } 4013 if (status) { 4014 status |= NVME_DNR; 4015 } 4016 4017 return status; 4018 } 4019 4020 static bool nvme_zone_matches_filter(uint32_t zafs, NvmeZone *zl) 4021 { 4022 NvmeZoneState zs = nvme_get_zone_state(zl); 4023 4024 switch (zafs) { 4025 case NVME_ZONE_REPORT_ALL: 4026 return true; 4027 case NVME_ZONE_REPORT_EMPTY: 4028 return zs == NVME_ZONE_STATE_EMPTY; 4029 case NVME_ZONE_REPORT_IMPLICITLY_OPEN: 4030 return zs == NVME_ZONE_STATE_IMPLICITLY_OPEN; 4031 case NVME_ZONE_REPORT_EXPLICITLY_OPEN: 4032 return zs == NVME_ZONE_STATE_EXPLICITLY_OPEN; 4033 case NVME_ZONE_REPORT_CLOSED: 4034 return zs == NVME_ZONE_STATE_CLOSED; 4035 case NVME_ZONE_REPORT_FULL: 4036 return zs == NVME_ZONE_STATE_FULL; 4037 case NVME_ZONE_REPORT_READ_ONLY: 4038 return zs == NVME_ZONE_STATE_READ_ONLY; 4039 case NVME_ZONE_REPORT_OFFLINE: 4040 return zs == NVME_ZONE_STATE_OFFLINE; 4041 default: 4042 return false; 4043 } 4044 } 4045 4046 static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) 4047 { 4048 NvmeCmd *cmd = (NvmeCmd *)&req->cmd; 4049 NvmeNamespace *ns = req->ns; 4050 /* cdw12 is zero-based number of dwords to return. Convert to bytes */ 4051 uint32_t data_size = (le32_to_cpu(cmd->cdw12) + 1) << 2; 4052 uint32_t dw13 = le32_to_cpu(cmd->cdw13); 4053 uint32_t zone_idx, zra, zrasf, partial; 4054 uint64_t max_zones, nr_zones = 0; 4055 uint16_t status; 4056 uint64_t slba; 4057 NvmeZoneDescr *z; 4058 NvmeZone *zone; 4059 NvmeZoneReportHeader *header; 4060 void *buf, *buf_p; 4061 size_t zone_entry_sz; 4062 int i; 4063 4064 req->status = NVME_SUCCESS; 4065 4066 status = nvme_get_mgmt_zone_slba_idx(ns, cmd, &slba, &zone_idx); 4067 if (status) { 4068 return status; 4069 } 4070 4071 zra = dw13 & 0xff; 4072 if (zra != NVME_ZONE_REPORT && zra != NVME_ZONE_REPORT_EXTENDED) { 4073 return NVME_INVALID_FIELD | NVME_DNR; 4074 } 4075 if (zra == NVME_ZONE_REPORT_EXTENDED && !ns->params.zd_extension_size) { 4076 return NVME_INVALID_FIELD | NVME_DNR; 4077 } 4078 4079 zrasf = (dw13 >> 8) & 0xff; 4080 if (zrasf > NVME_ZONE_REPORT_OFFLINE) { 4081 return NVME_INVALID_FIELD | NVME_DNR; 4082 } 4083 4084 if (data_size < sizeof(NvmeZoneReportHeader)) { 4085 return NVME_INVALID_FIELD | NVME_DNR; 4086 } 4087 4088 status = nvme_check_mdts(n, data_size); 4089 if (status) { 4090 return status; 4091 } 4092 4093 partial = (dw13 >> 16) & 0x01; 4094 4095 zone_entry_sz = sizeof(NvmeZoneDescr); 4096 if (zra == NVME_ZONE_REPORT_EXTENDED) { 4097 zone_entry_sz += ns->params.zd_extension_size; 4098 } 4099 4100 max_zones = (data_size - sizeof(NvmeZoneReportHeader)) / zone_entry_sz; 4101 buf = g_malloc0(data_size); 4102 4103 zone = &ns->zone_array[zone_idx]; 4104 for (i = zone_idx; i < ns->num_zones; i++) { 4105 if (partial && nr_zones >= max_zones) { 4106 break; 4107 } 4108 if (nvme_zone_matches_filter(zrasf, zone++)) { 4109 nr_zones++; 4110 } 4111 } 4112 header = (NvmeZoneReportHeader *)buf; 4113 header->nr_zones = cpu_to_le64(nr_zones); 4114 4115 buf_p = buf + sizeof(NvmeZoneReportHeader); 4116 for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) { 4117 zone = &ns->zone_array[zone_idx]; 4118 if (nvme_zone_matches_filter(zrasf, zone)) { 4119 z = (NvmeZoneDescr *)buf_p; 4120 buf_p += sizeof(NvmeZoneDescr); 4121 4122 z->zt = zone->d.zt; 4123 z->zs = zone->d.zs; 4124 z->zcap = cpu_to_le64(zone->d.zcap); 4125 z->zslba = cpu_to_le64(zone->d.zslba); 4126 z->za = zone->d.za; 4127 4128 if (nvme_wp_is_valid(zone)) { 4129 z->wp = cpu_to_le64(zone->d.wp); 4130 } else { 4131 z->wp = cpu_to_le64(~0ULL); 4132 } 4133 4134 if (zra == NVME_ZONE_REPORT_EXTENDED) { 4135 if (zone->d.za & NVME_ZA_ZD_EXT_VALID) { 4136 memcpy(buf_p, nvme_get_zd_extension(ns, zone_idx), 4137 ns->params.zd_extension_size); 4138 } 4139 buf_p += ns->params.zd_extension_size; 4140 } 4141 4142 max_zones--; 4143 } 4144 } 4145 4146 status = nvme_c2h(n, (uint8_t *)buf, data_size, req); 4147 4148 g_free(buf); 4149 4150 return status; 4151 } 4152 4153 static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) 4154 { 4155 NvmeNamespace *ns; 4156 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 4157 4158 trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req), 4159 req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode)); 4160 4161 if (!nvme_nsid_valid(n, nsid)) { 4162 return NVME_INVALID_NSID | NVME_DNR; 4163 } 4164 4165 /* 4166 * In the base NVM command set, Flush may apply to all namespaces 4167 * (indicated by NSID being set to FFFFFFFFh). But if that feature is used 4168 * along with TP 4056 (Namespace Types), it may be pretty screwed up. 4169 * 4170 * If NSID is indeed set to FFFFFFFFh, we simply cannot associate the 4171 * opcode with a specific command since we cannot determine a unique I/O 4172 * command set. Opcode 0h could have any other meaning than something 4173 * equivalent to flushing and say it DOES have completely different 4174 * semantics in some other command set - does an NSID of FFFFFFFFh then 4175 * mean "for all namespaces, apply whatever command set specific command 4176 * that uses the 0h opcode?" Or does it mean "for all namespaces, apply 4177 * whatever command that uses the 0h opcode if, and only if, it allows NSID 4178 * to be FFFFFFFFh"? 4179 * 4180 * Anyway (and luckily), for now, we do not care about this since the 4181 * device only supports namespace types that includes the NVM Flush command 4182 * (NVM and Zoned), so always do an NVM Flush. 4183 */ 4184 if (req->cmd.opcode == NVME_CMD_FLUSH) { 4185 return nvme_flush(n, req); 4186 } 4187 4188 ns = nvme_ns(n, nsid); 4189 if (unlikely(!ns)) { 4190 return NVME_INVALID_FIELD | NVME_DNR; 4191 } 4192 4193 if (!(ns->iocs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { 4194 trace_pci_nvme_err_invalid_opc(req->cmd.opcode); 4195 return NVME_INVALID_OPCODE | NVME_DNR; 4196 } 4197 4198 if (ns->status) { 4199 return ns->status; 4200 } 4201 4202 if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { 4203 return NVME_INVALID_FIELD; 4204 } 4205 4206 req->ns = ns; 4207 4208 switch (req->cmd.opcode) { 4209 case NVME_CMD_WRITE_ZEROES: 4210 return nvme_write_zeroes(n, req); 4211 case NVME_CMD_ZONE_APPEND: 4212 return nvme_zone_append(n, req); 4213 case NVME_CMD_WRITE: 4214 return nvme_write(n, req); 4215 case NVME_CMD_READ: 4216 return nvme_read(n, req); 4217 case NVME_CMD_COMPARE: 4218 return nvme_compare(n, req); 4219 case NVME_CMD_DSM: 4220 return nvme_dsm(n, req); 4221 case NVME_CMD_VERIFY: 4222 return nvme_verify(n, req); 4223 case NVME_CMD_COPY: 4224 return nvme_copy(n, req); 4225 case NVME_CMD_ZONE_MGMT_SEND: 4226 return nvme_zone_mgmt_send(n, req); 4227 case NVME_CMD_ZONE_MGMT_RECV: 4228 return nvme_zone_mgmt_recv(n, req); 4229 default: 4230 assert(false); 4231 } 4232 4233 return NVME_INVALID_OPCODE | NVME_DNR; 4234 } 4235 4236 static void nvme_cq_notifier(EventNotifier *e) 4237 { 4238 NvmeCQueue *cq = container_of(e, NvmeCQueue, notifier); 4239 NvmeCtrl *n = cq->ctrl; 4240 4241 event_notifier_test_and_clear(&cq->notifier); 4242 4243 nvme_update_cq_head(cq); 4244 4245 if (cq->tail == cq->head) { 4246 if (cq->irq_enabled) { 4247 n->cq_pending--; 4248 } 4249 4250 nvme_irq_deassert(n, cq); 4251 } 4252 4253 nvme_post_cqes(cq); 4254 } 4255 4256 static int nvme_init_cq_ioeventfd(NvmeCQueue *cq) 4257 { 4258 NvmeCtrl *n = cq->ctrl; 4259 uint16_t offset = (cq->cqid << 3) + (1 << 2); 4260 int ret; 4261 4262 ret = event_notifier_init(&cq->notifier, 0); 4263 if (ret < 0) { 4264 return ret; 4265 } 4266 4267 event_notifier_set_handler(&cq->notifier, nvme_cq_notifier); 4268 memory_region_add_eventfd(&n->iomem, 4269 0x1000 + offset, 4, false, 0, &cq->notifier); 4270 4271 return 0; 4272 } 4273 4274 static void nvme_sq_notifier(EventNotifier *e) 4275 { 4276 NvmeSQueue *sq = container_of(e, NvmeSQueue, notifier); 4277 4278 event_notifier_test_and_clear(&sq->notifier); 4279 4280 nvme_process_sq(sq); 4281 } 4282 4283 static int nvme_init_sq_ioeventfd(NvmeSQueue *sq) 4284 { 4285 NvmeCtrl *n = sq->ctrl; 4286 uint16_t offset = sq->sqid << 3; 4287 int ret; 4288 4289 ret = event_notifier_init(&sq->notifier, 0); 4290 if (ret < 0) { 4291 return ret; 4292 } 4293 4294 event_notifier_set_handler(&sq->notifier, nvme_sq_notifier); 4295 memory_region_add_eventfd(&n->iomem, 4296 0x1000 + offset, 4, false, 0, &sq->notifier); 4297 4298 return 0; 4299 } 4300 4301 static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n) 4302 { 4303 uint16_t offset = sq->sqid << 3; 4304 4305 n->sq[sq->sqid] = NULL; 4306 timer_free(sq->timer); 4307 if (sq->ioeventfd_enabled) { 4308 memory_region_del_eventfd(&n->iomem, 4309 0x1000 + offset, 4, false, 0, &sq->notifier); 4310 event_notifier_cleanup(&sq->notifier); 4311 } 4312 g_free(sq->io_req); 4313 if (sq->sqid) { 4314 g_free(sq); 4315 } 4316 } 4317 4318 static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req) 4319 { 4320 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; 4321 NvmeRequest *r, *next; 4322 NvmeSQueue *sq; 4323 NvmeCQueue *cq; 4324 uint16_t qid = le16_to_cpu(c->qid); 4325 4326 if (unlikely(!qid || nvme_check_sqid(n, qid))) { 4327 trace_pci_nvme_err_invalid_del_sq(qid); 4328 return NVME_INVALID_QID | NVME_DNR; 4329 } 4330 4331 trace_pci_nvme_del_sq(qid); 4332 4333 sq = n->sq[qid]; 4334 while (!QTAILQ_EMPTY(&sq->out_req_list)) { 4335 r = QTAILQ_FIRST(&sq->out_req_list); 4336 assert(r->aiocb); 4337 blk_aio_cancel(r->aiocb); 4338 } 4339 4340 assert(QTAILQ_EMPTY(&sq->out_req_list)); 4341 4342 if (!nvme_check_cqid(n, sq->cqid)) { 4343 cq = n->cq[sq->cqid]; 4344 QTAILQ_REMOVE(&cq->sq_list, sq, entry); 4345 4346 nvme_post_cqes(cq); 4347 QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) { 4348 if (r->sq == sq) { 4349 QTAILQ_REMOVE(&cq->req_list, r, entry); 4350 QTAILQ_INSERT_TAIL(&sq->req_list, r, entry); 4351 } 4352 } 4353 } 4354 4355 nvme_free_sq(sq, n); 4356 return NVME_SUCCESS; 4357 } 4358 4359 static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, 4360 uint16_t sqid, uint16_t cqid, uint16_t size) 4361 { 4362 int i; 4363 NvmeCQueue *cq; 4364 4365 sq->ctrl = n; 4366 sq->dma_addr = dma_addr; 4367 sq->sqid = sqid; 4368 sq->size = size; 4369 sq->cqid = cqid; 4370 sq->head = sq->tail = 0; 4371 sq->io_req = g_new0(NvmeRequest, sq->size); 4372 4373 QTAILQ_INIT(&sq->req_list); 4374 QTAILQ_INIT(&sq->out_req_list); 4375 for (i = 0; i < sq->size; i++) { 4376 sq->io_req[i].sq = sq; 4377 QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); 4378 } 4379 sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq); 4380 4381 if (n->dbbuf_enabled) { 4382 sq->db_addr = n->dbbuf_dbs + (sqid << 3); 4383 sq->ei_addr = n->dbbuf_eis + (sqid << 3); 4384 4385 if (n->params.ioeventfd && sq->sqid != 0) { 4386 if (!nvme_init_sq_ioeventfd(sq)) { 4387 sq->ioeventfd_enabled = true; 4388 } 4389 } 4390 } 4391 4392 assert(n->cq[cqid]); 4393 cq = n->cq[cqid]; 4394 QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry); 4395 n->sq[sqid] = sq; 4396 } 4397 4398 static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req) 4399 { 4400 NvmeSQueue *sq; 4401 NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd; 4402 4403 uint16_t cqid = le16_to_cpu(c->cqid); 4404 uint16_t sqid = le16_to_cpu(c->sqid); 4405 uint16_t qsize = le16_to_cpu(c->qsize); 4406 uint16_t qflags = le16_to_cpu(c->sq_flags); 4407 uint64_t prp1 = le64_to_cpu(c->prp1); 4408 4409 trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags); 4410 4411 if (unlikely(!cqid || nvme_check_cqid(n, cqid))) { 4412 trace_pci_nvme_err_invalid_create_sq_cqid(cqid); 4413 return NVME_INVALID_CQID | NVME_DNR; 4414 } 4415 if (unlikely(!sqid || sqid > n->conf_ioqpairs || n->sq[sqid] != NULL)) { 4416 trace_pci_nvme_err_invalid_create_sq_sqid(sqid); 4417 return NVME_INVALID_QID | NVME_DNR; 4418 } 4419 if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) { 4420 trace_pci_nvme_err_invalid_create_sq_size(qsize); 4421 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; 4422 } 4423 if (unlikely(prp1 & (n->page_size - 1))) { 4424 trace_pci_nvme_err_invalid_create_sq_addr(prp1); 4425 return NVME_INVALID_PRP_OFFSET | NVME_DNR; 4426 } 4427 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) { 4428 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags)); 4429 return NVME_INVALID_FIELD | NVME_DNR; 4430 } 4431 sq = g_malloc0(sizeof(*sq)); 4432 nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1); 4433 return NVME_SUCCESS; 4434 } 4435 4436 struct nvme_stats { 4437 uint64_t units_read; 4438 uint64_t units_written; 4439 uint64_t read_commands; 4440 uint64_t write_commands; 4441 }; 4442 4443 static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats) 4444 { 4445 BlockAcctStats *s = blk_get_stats(ns->blkconf.blk); 4446 4447 stats->units_read += s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS; 4448 stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS; 4449 stats->read_commands += s->nr_ops[BLOCK_ACCT_READ]; 4450 stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE]; 4451 } 4452 4453 static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, 4454 uint64_t off, NvmeRequest *req) 4455 { 4456 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 4457 struct nvme_stats stats = { 0 }; 4458 NvmeSmartLog smart = { 0 }; 4459 uint32_t trans_len; 4460 NvmeNamespace *ns; 4461 time_t current_ms; 4462 4463 if (off >= sizeof(smart)) { 4464 return NVME_INVALID_FIELD | NVME_DNR; 4465 } 4466 4467 if (nsid != 0xffffffff) { 4468 ns = nvme_ns(n, nsid); 4469 if (!ns) { 4470 return NVME_INVALID_NSID | NVME_DNR; 4471 } 4472 nvme_set_blk_stats(ns, &stats); 4473 } else { 4474 int i; 4475 4476 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 4477 ns = nvme_ns(n, i); 4478 if (!ns) { 4479 continue; 4480 } 4481 nvme_set_blk_stats(ns, &stats); 4482 } 4483 } 4484 4485 trans_len = MIN(sizeof(smart) - off, buf_len); 4486 smart.critical_warning = n->smart_critical_warning; 4487 4488 smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read, 4489 1000)); 4490 smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written, 4491 1000)); 4492 smart.host_read_commands[0] = cpu_to_le64(stats.read_commands); 4493 smart.host_write_commands[0] = cpu_to_le64(stats.write_commands); 4494 4495 smart.temperature = cpu_to_le16(n->temperature); 4496 4497 if ((n->temperature >= n->features.temp_thresh_hi) || 4498 (n->temperature <= n->features.temp_thresh_low)) { 4499 smart.critical_warning |= NVME_SMART_TEMPERATURE; 4500 } 4501 4502 current_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 4503 smart.power_on_hours[0] = 4504 cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60); 4505 4506 if (!rae) { 4507 nvme_clear_events(n, NVME_AER_TYPE_SMART); 4508 } 4509 4510 return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req); 4511 } 4512 4513 static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off, 4514 NvmeRequest *req) 4515 { 4516 uint32_t trans_len; 4517 NvmeFwSlotInfoLog fw_log = { 4518 .afi = 0x1, 4519 }; 4520 4521 if (off >= sizeof(fw_log)) { 4522 return NVME_INVALID_FIELD | NVME_DNR; 4523 } 4524 4525 strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' '); 4526 trans_len = MIN(sizeof(fw_log) - off, buf_len); 4527 4528 return nvme_c2h(n, (uint8_t *) &fw_log + off, trans_len, req); 4529 } 4530 4531 static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, 4532 uint64_t off, NvmeRequest *req) 4533 { 4534 uint32_t trans_len; 4535 NvmeErrorLog errlog; 4536 4537 if (off >= sizeof(errlog)) { 4538 return NVME_INVALID_FIELD | NVME_DNR; 4539 } 4540 4541 if (!rae) { 4542 nvme_clear_events(n, NVME_AER_TYPE_ERROR); 4543 } 4544 4545 memset(&errlog, 0x0, sizeof(errlog)); 4546 trans_len = MIN(sizeof(errlog) - off, buf_len); 4547 4548 return nvme_c2h(n, (uint8_t *)&errlog, trans_len, req); 4549 } 4550 4551 static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, 4552 uint64_t off, NvmeRequest *req) 4553 { 4554 uint32_t nslist[1024]; 4555 uint32_t trans_len; 4556 int i = 0; 4557 uint32_t nsid; 4558 4559 if (off >= sizeof(nslist)) { 4560 trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(nslist)); 4561 return NVME_INVALID_FIELD | NVME_DNR; 4562 } 4563 4564 memset(nslist, 0x0, sizeof(nslist)); 4565 trans_len = MIN(sizeof(nslist) - off, buf_len); 4566 4567 while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) != 4568 NVME_CHANGED_NSID_SIZE) { 4569 /* 4570 * If more than 1024 namespaces, the first entry in the log page should 4571 * be set to FFFFFFFFh and the others to 0 as spec. 4572 */ 4573 if (i == ARRAY_SIZE(nslist)) { 4574 memset(nslist, 0x0, sizeof(nslist)); 4575 nslist[0] = 0xffffffff; 4576 break; 4577 } 4578 4579 nslist[i++] = nsid; 4580 clear_bit(nsid, n->changed_nsids); 4581 } 4582 4583 /* 4584 * Remove all the remaining list entries in case returns directly due to 4585 * more than 1024 namespaces. 4586 */ 4587 if (nslist[0] == 0xffffffff) { 4588 bitmap_zero(n->changed_nsids, NVME_CHANGED_NSID_SIZE); 4589 } 4590 4591 if (!rae) { 4592 nvme_clear_events(n, NVME_AER_TYPE_NOTICE); 4593 } 4594 4595 return nvme_c2h(n, ((uint8_t *)nslist) + off, trans_len, req); 4596 } 4597 4598 static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len, 4599 uint64_t off, NvmeRequest *req) 4600 { 4601 NvmeEffectsLog log = {}; 4602 const uint32_t *src_iocs = NULL; 4603 uint32_t trans_len; 4604 4605 if (off >= sizeof(log)) { 4606 trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(log)); 4607 return NVME_INVALID_FIELD | NVME_DNR; 4608 } 4609 4610 switch (NVME_CC_CSS(ldl_le_p(&n->bar.cc))) { 4611 case NVME_CC_CSS_NVM: 4612 src_iocs = nvme_cse_iocs_nvm; 4613 /* fall through */ 4614 case NVME_CC_CSS_ADMIN_ONLY: 4615 break; 4616 case NVME_CC_CSS_CSI: 4617 switch (csi) { 4618 case NVME_CSI_NVM: 4619 src_iocs = nvme_cse_iocs_nvm; 4620 break; 4621 case NVME_CSI_ZONED: 4622 src_iocs = nvme_cse_iocs_zoned; 4623 break; 4624 } 4625 } 4626 4627 memcpy(log.acs, nvme_cse_acs, sizeof(nvme_cse_acs)); 4628 4629 if (src_iocs) { 4630 memcpy(log.iocs, src_iocs, sizeof(log.iocs)); 4631 } 4632 4633 trans_len = MIN(sizeof(log) - off, buf_len); 4634 4635 return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req); 4636 } 4637 4638 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) 4639 { 4640 NvmeCmd *cmd = &req->cmd; 4641 4642 uint32_t dw10 = le32_to_cpu(cmd->cdw10); 4643 uint32_t dw11 = le32_to_cpu(cmd->cdw11); 4644 uint32_t dw12 = le32_to_cpu(cmd->cdw12); 4645 uint32_t dw13 = le32_to_cpu(cmd->cdw13); 4646 uint8_t lid = dw10 & 0xff; 4647 uint8_t lsp = (dw10 >> 8) & 0xf; 4648 uint8_t rae = (dw10 >> 15) & 0x1; 4649 uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24; 4650 uint32_t numdl, numdu; 4651 uint64_t off, lpol, lpou; 4652 size_t len; 4653 uint16_t status; 4654 4655 numdl = (dw10 >> 16); 4656 numdu = (dw11 & 0xffff); 4657 lpol = dw12; 4658 lpou = dw13; 4659 4660 len = (((numdu << 16) | numdl) + 1) << 2; 4661 off = (lpou << 32ULL) | lpol; 4662 4663 if (off & 0x3) { 4664 return NVME_INVALID_FIELD | NVME_DNR; 4665 } 4666 4667 trace_pci_nvme_get_log(nvme_cid(req), lid, lsp, rae, len, off); 4668 4669 status = nvme_check_mdts(n, len); 4670 if (status) { 4671 return status; 4672 } 4673 4674 switch (lid) { 4675 case NVME_LOG_ERROR_INFO: 4676 return nvme_error_info(n, rae, len, off, req); 4677 case NVME_LOG_SMART_INFO: 4678 return nvme_smart_info(n, rae, len, off, req); 4679 case NVME_LOG_FW_SLOT_INFO: 4680 return nvme_fw_log_info(n, len, off, req); 4681 case NVME_LOG_CHANGED_NSLIST: 4682 return nvme_changed_nslist(n, rae, len, off, req); 4683 case NVME_LOG_CMD_EFFECTS: 4684 return nvme_cmd_effects(n, csi, len, off, req); 4685 default: 4686 trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid); 4687 return NVME_INVALID_FIELD | NVME_DNR; 4688 } 4689 } 4690 4691 static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n) 4692 { 4693 uint16_t offset = (cq->cqid << 3) + (1 << 2); 4694 4695 n->cq[cq->cqid] = NULL; 4696 timer_free(cq->timer); 4697 if (cq->ioeventfd_enabled) { 4698 memory_region_del_eventfd(&n->iomem, 4699 0x1000 + offset, 4, false, 0, &cq->notifier); 4700 event_notifier_cleanup(&cq->notifier); 4701 } 4702 if (msix_enabled(&n->parent_obj)) { 4703 msix_vector_unuse(&n->parent_obj, cq->vector); 4704 } 4705 if (cq->cqid) { 4706 g_free(cq); 4707 } 4708 } 4709 4710 static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req) 4711 { 4712 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; 4713 NvmeCQueue *cq; 4714 uint16_t qid = le16_to_cpu(c->qid); 4715 4716 if (unlikely(!qid || nvme_check_cqid(n, qid))) { 4717 trace_pci_nvme_err_invalid_del_cq_cqid(qid); 4718 return NVME_INVALID_CQID | NVME_DNR; 4719 } 4720 4721 cq = n->cq[qid]; 4722 if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) { 4723 trace_pci_nvme_err_invalid_del_cq_notempty(qid); 4724 return NVME_INVALID_QUEUE_DEL; 4725 } 4726 4727 if (cq->irq_enabled && cq->tail != cq->head) { 4728 n->cq_pending--; 4729 } 4730 4731 nvme_irq_deassert(n, cq); 4732 trace_pci_nvme_del_cq(qid); 4733 nvme_free_cq(cq, n); 4734 return NVME_SUCCESS; 4735 } 4736 4737 static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, 4738 uint16_t cqid, uint16_t vector, uint16_t size, 4739 uint16_t irq_enabled) 4740 { 4741 int ret; 4742 4743 if (msix_enabled(&n->parent_obj)) { 4744 ret = msix_vector_use(&n->parent_obj, vector); 4745 assert(ret == 0); 4746 } 4747 cq->ctrl = n; 4748 cq->cqid = cqid; 4749 cq->size = size; 4750 cq->dma_addr = dma_addr; 4751 cq->phase = 1; 4752 cq->irq_enabled = irq_enabled; 4753 cq->vector = vector; 4754 cq->head = cq->tail = 0; 4755 QTAILQ_INIT(&cq->req_list); 4756 QTAILQ_INIT(&cq->sq_list); 4757 if (n->dbbuf_enabled) { 4758 cq->db_addr = n->dbbuf_dbs + (cqid << 3) + (1 << 2); 4759 cq->ei_addr = n->dbbuf_eis + (cqid << 3) + (1 << 2); 4760 4761 if (n->params.ioeventfd && cqid != 0) { 4762 if (!nvme_init_cq_ioeventfd(cq)) { 4763 cq->ioeventfd_enabled = true; 4764 } 4765 } 4766 } 4767 n->cq[cqid] = cq; 4768 cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq); 4769 } 4770 4771 static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) 4772 { 4773 NvmeCQueue *cq; 4774 NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd; 4775 uint16_t cqid = le16_to_cpu(c->cqid); 4776 uint16_t vector = le16_to_cpu(c->irq_vector); 4777 uint16_t qsize = le16_to_cpu(c->qsize); 4778 uint16_t qflags = le16_to_cpu(c->cq_flags); 4779 uint64_t prp1 = le64_to_cpu(c->prp1); 4780 4781 trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags, 4782 NVME_CQ_FLAGS_IEN(qflags) != 0); 4783 4784 if (unlikely(!cqid || cqid > n->conf_ioqpairs || n->cq[cqid] != NULL)) { 4785 trace_pci_nvme_err_invalid_create_cq_cqid(cqid); 4786 return NVME_INVALID_QID | NVME_DNR; 4787 } 4788 if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) { 4789 trace_pci_nvme_err_invalid_create_cq_size(qsize); 4790 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; 4791 } 4792 if (unlikely(prp1 & (n->page_size - 1))) { 4793 trace_pci_nvme_err_invalid_create_cq_addr(prp1); 4794 return NVME_INVALID_PRP_OFFSET | NVME_DNR; 4795 } 4796 if (unlikely(!msix_enabled(&n->parent_obj) && vector)) { 4797 trace_pci_nvme_err_invalid_create_cq_vector(vector); 4798 return NVME_INVALID_IRQ_VECTOR | NVME_DNR; 4799 } 4800 if (unlikely(vector >= n->conf_msix_qsize)) { 4801 trace_pci_nvme_err_invalid_create_cq_vector(vector); 4802 return NVME_INVALID_IRQ_VECTOR | NVME_DNR; 4803 } 4804 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) { 4805 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags)); 4806 return NVME_INVALID_FIELD | NVME_DNR; 4807 } 4808 4809 cq = g_malloc0(sizeof(*cq)); 4810 nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1, 4811 NVME_CQ_FLAGS_IEN(qflags)); 4812 4813 /* 4814 * It is only required to set qs_created when creating a completion queue; 4815 * creating a submission queue without a matching completion queue will 4816 * fail. 4817 */ 4818 n->qs_created = true; 4819 return NVME_SUCCESS; 4820 } 4821 4822 static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest *req) 4823 { 4824 uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {}; 4825 4826 return nvme_c2h(n, id, sizeof(id), req); 4827 } 4828 4829 static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req) 4830 { 4831 trace_pci_nvme_identify_ctrl(); 4832 4833 return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req); 4834 } 4835 4836 static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req) 4837 { 4838 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 4839 uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {}; 4840 NvmeIdCtrlNvm *id_nvm = (NvmeIdCtrlNvm *)&id; 4841 4842 trace_pci_nvme_identify_ctrl_csi(c->csi); 4843 4844 switch (c->csi) { 4845 case NVME_CSI_NVM: 4846 id_nvm->vsl = n->params.vsl; 4847 id_nvm->dmrsl = cpu_to_le32(n->dmrsl); 4848 break; 4849 4850 case NVME_CSI_ZONED: 4851 ((NvmeIdCtrlZoned *)&id)->zasl = n->params.zasl; 4852 break; 4853 4854 default: 4855 return NVME_INVALID_FIELD | NVME_DNR; 4856 } 4857 4858 return nvme_c2h(n, id, sizeof(id), req); 4859 } 4860 4861 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active) 4862 { 4863 NvmeNamespace *ns; 4864 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 4865 uint32_t nsid = le32_to_cpu(c->nsid); 4866 4867 trace_pci_nvme_identify_ns(nsid); 4868 4869 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { 4870 return NVME_INVALID_NSID | NVME_DNR; 4871 } 4872 4873 ns = nvme_ns(n, nsid); 4874 if (unlikely(!ns)) { 4875 if (!active) { 4876 ns = nvme_subsys_ns(n->subsys, nsid); 4877 if (!ns) { 4878 return nvme_rpt_empty_id_struct(n, req); 4879 } 4880 } else { 4881 return nvme_rpt_empty_id_struct(n, req); 4882 } 4883 } 4884 4885 if (active || ns->csi == NVME_CSI_NVM) { 4886 return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req); 4887 } 4888 4889 return NVME_INVALID_CMD_SET | NVME_DNR; 4890 } 4891 4892 static uint16_t nvme_identify_ctrl_list(NvmeCtrl *n, NvmeRequest *req, 4893 bool attached) 4894 { 4895 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 4896 uint32_t nsid = le32_to_cpu(c->nsid); 4897 uint16_t min_id = le16_to_cpu(c->ctrlid); 4898 uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {}; 4899 uint16_t *ids = &list[1]; 4900 NvmeNamespace *ns; 4901 NvmeCtrl *ctrl; 4902 int cntlid, nr_ids = 0; 4903 4904 trace_pci_nvme_identify_ctrl_list(c->cns, min_id); 4905 4906 if (!n->subsys) { 4907 return NVME_INVALID_FIELD | NVME_DNR; 4908 } 4909 4910 if (attached) { 4911 if (nsid == NVME_NSID_BROADCAST) { 4912 return NVME_INVALID_FIELD | NVME_DNR; 4913 } 4914 4915 ns = nvme_subsys_ns(n->subsys, nsid); 4916 if (!ns) { 4917 return NVME_INVALID_FIELD | NVME_DNR; 4918 } 4919 } 4920 4921 for (cntlid = min_id; cntlid < ARRAY_SIZE(n->subsys->ctrls); cntlid++) { 4922 ctrl = nvme_subsys_ctrl(n->subsys, cntlid); 4923 if (!ctrl) { 4924 continue; 4925 } 4926 4927 if (attached && !nvme_ns(ctrl, nsid)) { 4928 continue; 4929 } 4930 4931 ids[nr_ids++] = cntlid; 4932 } 4933 4934 list[0] = nr_ids; 4935 4936 return nvme_c2h(n, (uint8_t *)list, sizeof(list), req); 4937 } 4938 4939 static uint16_t nvme_identify_pri_ctrl_cap(NvmeCtrl *n, NvmeRequest *req) 4940 { 4941 trace_pci_nvme_identify_pri_ctrl_cap(le16_to_cpu(n->pri_ctrl_cap.cntlid)); 4942 4943 return nvme_c2h(n, (uint8_t *)&n->pri_ctrl_cap, 4944 sizeof(NvmePriCtrlCap), req); 4945 } 4946 4947 static uint16_t nvme_identify_sec_ctrl_list(NvmeCtrl *n, NvmeRequest *req) 4948 { 4949 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 4950 uint16_t pri_ctrl_id = le16_to_cpu(n->pri_ctrl_cap.cntlid); 4951 uint16_t min_id = le16_to_cpu(c->ctrlid); 4952 uint8_t num_sec_ctrl = n->sec_ctrl_list.numcntl; 4953 NvmeSecCtrlList list = {0}; 4954 uint8_t i; 4955 4956 for (i = 0; i < num_sec_ctrl; i++) { 4957 if (n->sec_ctrl_list.sec[i].scid >= min_id) { 4958 list.numcntl = num_sec_ctrl - i; 4959 memcpy(&list.sec, n->sec_ctrl_list.sec + i, 4960 list.numcntl * sizeof(NvmeSecCtrlEntry)); 4961 break; 4962 } 4963 } 4964 4965 trace_pci_nvme_identify_sec_ctrl_list(pri_ctrl_id, list.numcntl); 4966 4967 return nvme_c2h(n, (uint8_t *)&list, sizeof(list), req); 4968 } 4969 4970 static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, 4971 bool active) 4972 { 4973 NvmeNamespace *ns; 4974 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 4975 uint32_t nsid = le32_to_cpu(c->nsid); 4976 4977 trace_pci_nvme_identify_ns_csi(nsid, c->csi); 4978 4979 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { 4980 return NVME_INVALID_NSID | NVME_DNR; 4981 } 4982 4983 ns = nvme_ns(n, nsid); 4984 if (unlikely(!ns)) { 4985 if (!active) { 4986 ns = nvme_subsys_ns(n->subsys, nsid); 4987 if (!ns) { 4988 return nvme_rpt_empty_id_struct(n, req); 4989 } 4990 } else { 4991 return nvme_rpt_empty_id_struct(n, req); 4992 } 4993 } 4994 4995 if (c->csi == NVME_CSI_NVM) { 4996 return nvme_c2h(n, (uint8_t *)&ns->id_ns_nvm, sizeof(NvmeIdNsNvm), 4997 req); 4998 } else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) { 4999 return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned), 5000 req); 5001 } 5002 5003 return NVME_INVALID_FIELD | NVME_DNR; 5004 } 5005 5006 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req, 5007 bool active) 5008 { 5009 NvmeNamespace *ns; 5010 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5011 uint32_t min_nsid = le32_to_cpu(c->nsid); 5012 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; 5013 static const int data_len = sizeof(list); 5014 uint32_t *list_ptr = (uint32_t *)list; 5015 int i, j = 0; 5016 5017 trace_pci_nvme_identify_nslist(min_nsid); 5018 5019 /* 5020 * Both FFFFFFFFh (NVME_NSID_BROADCAST) and FFFFFFFFEh are invalid values 5021 * since the Active Namespace ID List should return namespaces with ids 5022 * *higher* than the NSID specified in the command. This is also specified 5023 * in the spec (NVM Express v1.3d, Section 5.15.4). 5024 */ 5025 if (min_nsid >= NVME_NSID_BROADCAST - 1) { 5026 return NVME_INVALID_NSID | NVME_DNR; 5027 } 5028 5029 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 5030 ns = nvme_ns(n, i); 5031 if (!ns) { 5032 if (!active) { 5033 ns = nvme_subsys_ns(n->subsys, i); 5034 if (!ns) { 5035 continue; 5036 } 5037 } else { 5038 continue; 5039 } 5040 } 5041 if (ns->params.nsid <= min_nsid) { 5042 continue; 5043 } 5044 list_ptr[j++] = cpu_to_le32(ns->params.nsid); 5045 if (j == data_len / sizeof(uint32_t)) { 5046 break; 5047 } 5048 } 5049 5050 return nvme_c2h(n, list, data_len, req); 5051 } 5052 5053 static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req, 5054 bool active) 5055 { 5056 NvmeNamespace *ns; 5057 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5058 uint32_t min_nsid = le32_to_cpu(c->nsid); 5059 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; 5060 static const int data_len = sizeof(list); 5061 uint32_t *list_ptr = (uint32_t *)list; 5062 int i, j = 0; 5063 5064 trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi); 5065 5066 /* 5067 * Same as in nvme_identify_nslist(), FFFFFFFFh/FFFFFFFFEh are invalid. 5068 */ 5069 if (min_nsid >= NVME_NSID_BROADCAST - 1) { 5070 return NVME_INVALID_NSID | NVME_DNR; 5071 } 5072 5073 if (c->csi != NVME_CSI_NVM && c->csi != NVME_CSI_ZONED) { 5074 return NVME_INVALID_FIELD | NVME_DNR; 5075 } 5076 5077 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 5078 ns = nvme_ns(n, i); 5079 if (!ns) { 5080 if (!active) { 5081 ns = nvme_subsys_ns(n->subsys, i); 5082 if (!ns) { 5083 continue; 5084 } 5085 } else { 5086 continue; 5087 } 5088 } 5089 if (ns->params.nsid <= min_nsid || c->csi != ns->csi) { 5090 continue; 5091 } 5092 list_ptr[j++] = cpu_to_le32(ns->params.nsid); 5093 if (j == data_len / sizeof(uint32_t)) { 5094 break; 5095 } 5096 } 5097 5098 return nvme_c2h(n, list, data_len, req); 5099 } 5100 5101 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req) 5102 { 5103 NvmeNamespace *ns; 5104 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5105 uint32_t nsid = le32_to_cpu(c->nsid); 5106 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; 5107 uint8_t *pos = list; 5108 struct { 5109 NvmeIdNsDescr hdr; 5110 uint8_t v[NVME_NIDL_UUID]; 5111 } QEMU_PACKED uuid = {}; 5112 struct { 5113 NvmeIdNsDescr hdr; 5114 uint64_t v; 5115 } QEMU_PACKED eui64 = {}; 5116 struct { 5117 NvmeIdNsDescr hdr; 5118 uint8_t v; 5119 } QEMU_PACKED csi = {}; 5120 5121 trace_pci_nvme_identify_ns_descr_list(nsid); 5122 5123 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { 5124 return NVME_INVALID_NSID | NVME_DNR; 5125 } 5126 5127 ns = nvme_ns(n, nsid); 5128 if (unlikely(!ns)) { 5129 return NVME_INVALID_FIELD | NVME_DNR; 5130 } 5131 5132 if (!qemu_uuid_is_null(&ns->params.uuid)) { 5133 uuid.hdr.nidt = NVME_NIDT_UUID; 5134 uuid.hdr.nidl = NVME_NIDL_UUID; 5135 memcpy(uuid.v, ns->params.uuid.data, NVME_NIDL_UUID); 5136 memcpy(pos, &uuid, sizeof(uuid)); 5137 pos += sizeof(uuid); 5138 } 5139 5140 if (ns->params.eui64) { 5141 eui64.hdr.nidt = NVME_NIDT_EUI64; 5142 eui64.hdr.nidl = NVME_NIDL_EUI64; 5143 eui64.v = cpu_to_be64(ns->params.eui64); 5144 memcpy(pos, &eui64, sizeof(eui64)); 5145 pos += sizeof(eui64); 5146 } 5147 5148 csi.hdr.nidt = NVME_NIDT_CSI; 5149 csi.hdr.nidl = NVME_NIDL_CSI; 5150 csi.v = ns->csi; 5151 memcpy(pos, &csi, sizeof(csi)); 5152 pos += sizeof(csi); 5153 5154 return nvme_c2h(n, list, sizeof(list), req); 5155 } 5156 5157 static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req) 5158 { 5159 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; 5160 static const int data_len = sizeof(list); 5161 5162 trace_pci_nvme_identify_cmd_set(); 5163 5164 NVME_SET_CSI(*list, NVME_CSI_NVM); 5165 NVME_SET_CSI(*list, NVME_CSI_ZONED); 5166 5167 return nvme_c2h(n, list, data_len, req); 5168 } 5169 5170 static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req) 5171 { 5172 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5173 5174 trace_pci_nvme_identify(nvme_cid(req), c->cns, le16_to_cpu(c->ctrlid), 5175 c->csi); 5176 5177 switch (c->cns) { 5178 case NVME_ID_CNS_NS: 5179 return nvme_identify_ns(n, req, true); 5180 case NVME_ID_CNS_NS_PRESENT: 5181 return nvme_identify_ns(n, req, false); 5182 case NVME_ID_CNS_NS_ATTACHED_CTRL_LIST: 5183 return nvme_identify_ctrl_list(n, req, true); 5184 case NVME_ID_CNS_CTRL_LIST: 5185 return nvme_identify_ctrl_list(n, req, false); 5186 case NVME_ID_CNS_PRIMARY_CTRL_CAP: 5187 return nvme_identify_pri_ctrl_cap(n, req); 5188 case NVME_ID_CNS_SECONDARY_CTRL_LIST: 5189 return nvme_identify_sec_ctrl_list(n, req); 5190 case NVME_ID_CNS_CS_NS: 5191 return nvme_identify_ns_csi(n, req, true); 5192 case NVME_ID_CNS_CS_NS_PRESENT: 5193 return nvme_identify_ns_csi(n, req, false); 5194 case NVME_ID_CNS_CTRL: 5195 return nvme_identify_ctrl(n, req); 5196 case NVME_ID_CNS_CS_CTRL: 5197 return nvme_identify_ctrl_csi(n, req); 5198 case NVME_ID_CNS_NS_ACTIVE_LIST: 5199 return nvme_identify_nslist(n, req, true); 5200 case NVME_ID_CNS_NS_PRESENT_LIST: 5201 return nvme_identify_nslist(n, req, false); 5202 case NVME_ID_CNS_CS_NS_ACTIVE_LIST: 5203 return nvme_identify_nslist_csi(n, req, true); 5204 case NVME_ID_CNS_CS_NS_PRESENT_LIST: 5205 return nvme_identify_nslist_csi(n, req, false); 5206 case NVME_ID_CNS_NS_DESCR_LIST: 5207 return nvme_identify_ns_descr_list(n, req); 5208 case NVME_ID_CNS_IO_COMMAND_SET: 5209 return nvme_identify_cmd_set(n, req); 5210 default: 5211 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns)); 5212 return NVME_INVALID_FIELD | NVME_DNR; 5213 } 5214 } 5215 5216 static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req) 5217 { 5218 uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff; 5219 5220 req->cqe.result = 1; 5221 if (nvme_check_sqid(n, sqid)) { 5222 return NVME_INVALID_FIELD | NVME_DNR; 5223 } 5224 5225 return NVME_SUCCESS; 5226 } 5227 5228 static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts) 5229 { 5230 trace_pci_nvme_setfeat_timestamp(ts); 5231 5232 n->host_timestamp = le64_to_cpu(ts); 5233 n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 5234 } 5235 5236 static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n) 5237 { 5238 uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 5239 uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms; 5240 5241 union nvme_timestamp { 5242 struct { 5243 uint64_t timestamp:48; 5244 uint64_t sync:1; 5245 uint64_t origin:3; 5246 uint64_t rsvd1:12; 5247 }; 5248 uint64_t all; 5249 }; 5250 5251 union nvme_timestamp ts; 5252 ts.all = 0; 5253 ts.timestamp = n->host_timestamp + elapsed_time; 5254 5255 /* If the host timestamp is non-zero, set the timestamp origin */ 5256 ts.origin = n->host_timestamp ? 0x01 : 0x00; 5257 5258 trace_pci_nvme_getfeat_timestamp(ts.all); 5259 5260 return cpu_to_le64(ts.all); 5261 } 5262 5263 static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) 5264 { 5265 uint64_t timestamp = nvme_get_timestamp(n); 5266 5267 return nvme_c2h(n, (uint8_t *)×tamp, sizeof(timestamp), req); 5268 } 5269 5270 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) 5271 { 5272 NvmeCmd *cmd = &req->cmd; 5273 uint32_t dw10 = le32_to_cpu(cmd->cdw10); 5274 uint32_t dw11 = le32_to_cpu(cmd->cdw11); 5275 uint32_t nsid = le32_to_cpu(cmd->nsid); 5276 uint32_t result; 5277 uint8_t fid = NVME_GETSETFEAT_FID(dw10); 5278 NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10); 5279 uint16_t iv; 5280 NvmeNamespace *ns; 5281 int i; 5282 5283 static const uint32_t nvme_feature_default[NVME_FID_MAX] = { 5284 [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT, 5285 }; 5286 5287 trace_pci_nvme_getfeat(nvme_cid(req), nsid, fid, sel, dw11); 5288 5289 if (!nvme_feature_support[fid]) { 5290 return NVME_INVALID_FIELD | NVME_DNR; 5291 } 5292 5293 if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { 5294 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { 5295 /* 5296 * The Reservation Notification Mask and Reservation Persistence 5297 * features require a status code of Invalid Field in Command when 5298 * NSID is FFFFFFFFh. Since the device does not support those 5299 * features we can always return Invalid Namespace or Format as we 5300 * should do for all other features. 5301 */ 5302 return NVME_INVALID_NSID | NVME_DNR; 5303 } 5304 5305 if (!nvme_ns(n, nsid)) { 5306 return NVME_INVALID_FIELD | NVME_DNR; 5307 } 5308 } 5309 5310 switch (sel) { 5311 case NVME_GETFEAT_SELECT_CURRENT: 5312 break; 5313 case NVME_GETFEAT_SELECT_SAVED: 5314 /* no features are saveable by the controller; fallthrough */ 5315 case NVME_GETFEAT_SELECT_DEFAULT: 5316 goto defaults; 5317 case NVME_GETFEAT_SELECT_CAP: 5318 result = nvme_feature_cap[fid]; 5319 goto out; 5320 } 5321 5322 switch (fid) { 5323 case NVME_TEMPERATURE_THRESHOLD: 5324 result = 0; 5325 5326 /* 5327 * The controller only implements the Composite Temperature sensor, so 5328 * return 0 for all other sensors. 5329 */ 5330 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { 5331 goto out; 5332 } 5333 5334 switch (NVME_TEMP_THSEL(dw11)) { 5335 case NVME_TEMP_THSEL_OVER: 5336 result = n->features.temp_thresh_hi; 5337 goto out; 5338 case NVME_TEMP_THSEL_UNDER: 5339 result = n->features.temp_thresh_low; 5340 goto out; 5341 } 5342 5343 return NVME_INVALID_FIELD | NVME_DNR; 5344 case NVME_ERROR_RECOVERY: 5345 if (!nvme_nsid_valid(n, nsid)) { 5346 return NVME_INVALID_NSID | NVME_DNR; 5347 } 5348 5349 ns = nvme_ns(n, nsid); 5350 if (unlikely(!ns)) { 5351 return NVME_INVALID_FIELD | NVME_DNR; 5352 } 5353 5354 result = ns->features.err_rec; 5355 goto out; 5356 case NVME_VOLATILE_WRITE_CACHE: 5357 result = 0; 5358 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 5359 ns = nvme_ns(n, i); 5360 if (!ns) { 5361 continue; 5362 } 5363 5364 result = blk_enable_write_cache(ns->blkconf.blk); 5365 if (result) { 5366 break; 5367 } 5368 } 5369 trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled"); 5370 goto out; 5371 case NVME_ASYNCHRONOUS_EVENT_CONF: 5372 result = n->features.async_config; 5373 goto out; 5374 case NVME_TIMESTAMP: 5375 return nvme_get_feature_timestamp(n, req); 5376 case NVME_HOST_BEHAVIOR_SUPPORT: 5377 return nvme_c2h(n, (uint8_t *)&n->features.hbs, 5378 sizeof(n->features.hbs), req); 5379 default: 5380 break; 5381 } 5382 5383 defaults: 5384 switch (fid) { 5385 case NVME_TEMPERATURE_THRESHOLD: 5386 result = 0; 5387 5388 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { 5389 break; 5390 } 5391 5392 if (NVME_TEMP_THSEL(dw11) == NVME_TEMP_THSEL_OVER) { 5393 result = NVME_TEMPERATURE_WARNING; 5394 } 5395 5396 break; 5397 case NVME_NUMBER_OF_QUEUES: 5398 result = (n->conf_ioqpairs - 1) | ((n->conf_ioqpairs - 1) << 16); 5399 trace_pci_nvme_getfeat_numq(result); 5400 break; 5401 case NVME_INTERRUPT_VECTOR_CONF: 5402 iv = dw11 & 0xffff; 5403 if (iv >= n->conf_ioqpairs + 1) { 5404 return NVME_INVALID_FIELD | NVME_DNR; 5405 } 5406 5407 result = iv; 5408 if (iv == n->admin_cq.vector) { 5409 result |= NVME_INTVC_NOCOALESCING; 5410 } 5411 break; 5412 default: 5413 result = nvme_feature_default[fid]; 5414 break; 5415 } 5416 5417 out: 5418 req->cqe.result = cpu_to_le32(result); 5419 return NVME_SUCCESS; 5420 } 5421 5422 static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) 5423 { 5424 uint16_t ret; 5425 uint64_t timestamp; 5426 5427 ret = nvme_h2c(n, (uint8_t *)×tamp, sizeof(timestamp), req); 5428 if (ret) { 5429 return ret; 5430 } 5431 5432 nvme_set_timestamp(n, timestamp); 5433 5434 return NVME_SUCCESS; 5435 } 5436 5437 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) 5438 { 5439 NvmeNamespace *ns = NULL; 5440 5441 NvmeCmd *cmd = &req->cmd; 5442 uint32_t dw10 = le32_to_cpu(cmd->cdw10); 5443 uint32_t dw11 = le32_to_cpu(cmd->cdw11); 5444 uint32_t nsid = le32_to_cpu(cmd->nsid); 5445 uint8_t fid = NVME_GETSETFEAT_FID(dw10); 5446 uint8_t save = NVME_SETFEAT_SAVE(dw10); 5447 uint16_t status; 5448 int i; 5449 5450 trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11); 5451 5452 if (save && !(nvme_feature_cap[fid] & NVME_FEAT_CAP_SAVE)) { 5453 return NVME_FID_NOT_SAVEABLE | NVME_DNR; 5454 } 5455 5456 if (!nvme_feature_support[fid]) { 5457 return NVME_INVALID_FIELD | NVME_DNR; 5458 } 5459 5460 if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { 5461 if (nsid != NVME_NSID_BROADCAST) { 5462 if (!nvme_nsid_valid(n, nsid)) { 5463 return NVME_INVALID_NSID | NVME_DNR; 5464 } 5465 5466 ns = nvme_ns(n, nsid); 5467 if (unlikely(!ns)) { 5468 return NVME_INVALID_FIELD | NVME_DNR; 5469 } 5470 } 5471 } else if (nsid && nsid != NVME_NSID_BROADCAST) { 5472 if (!nvme_nsid_valid(n, nsid)) { 5473 return NVME_INVALID_NSID | NVME_DNR; 5474 } 5475 5476 return NVME_FEAT_NOT_NS_SPEC | NVME_DNR; 5477 } 5478 5479 if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) { 5480 return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; 5481 } 5482 5483 switch (fid) { 5484 case NVME_TEMPERATURE_THRESHOLD: 5485 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { 5486 break; 5487 } 5488 5489 switch (NVME_TEMP_THSEL(dw11)) { 5490 case NVME_TEMP_THSEL_OVER: 5491 n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11); 5492 break; 5493 case NVME_TEMP_THSEL_UNDER: 5494 n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11); 5495 break; 5496 default: 5497 return NVME_INVALID_FIELD | NVME_DNR; 5498 } 5499 5500 if ((n->temperature >= n->features.temp_thresh_hi) || 5501 (n->temperature <= n->features.temp_thresh_low)) { 5502 nvme_smart_event(n, NVME_SMART_TEMPERATURE); 5503 } 5504 5505 break; 5506 case NVME_ERROR_RECOVERY: 5507 if (nsid == NVME_NSID_BROADCAST) { 5508 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 5509 ns = nvme_ns(n, i); 5510 5511 if (!ns) { 5512 continue; 5513 } 5514 5515 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { 5516 ns->features.err_rec = dw11; 5517 } 5518 } 5519 5520 break; 5521 } 5522 5523 assert(ns); 5524 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { 5525 ns->features.err_rec = dw11; 5526 } 5527 break; 5528 case NVME_VOLATILE_WRITE_CACHE: 5529 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 5530 ns = nvme_ns(n, i); 5531 if (!ns) { 5532 continue; 5533 } 5534 5535 if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) { 5536 blk_flush(ns->blkconf.blk); 5537 } 5538 5539 blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1); 5540 } 5541 5542 break; 5543 5544 case NVME_NUMBER_OF_QUEUES: 5545 if (n->qs_created) { 5546 return NVME_CMD_SEQ_ERROR | NVME_DNR; 5547 } 5548 5549 /* 5550 * NVMe v1.3, Section 5.21.1.7: FFFFh is not an allowed value for NCQR 5551 * and NSQR. 5552 */ 5553 if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) { 5554 return NVME_INVALID_FIELD | NVME_DNR; 5555 } 5556 5557 trace_pci_nvme_setfeat_numq((dw11 & 0xffff) + 1, 5558 ((dw11 >> 16) & 0xffff) + 1, 5559 n->conf_ioqpairs, 5560 n->conf_ioqpairs); 5561 req->cqe.result = cpu_to_le32((n->conf_ioqpairs - 1) | 5562 ((n->conf_ioqpairs - 1) << 16)); 5563 break; 5564 case NVME_ASYNCHRONOUS_EVENT_CONF: 5565 n->features.async_config = dw11; 5566 break; 5567 case NVME_TIMESTAMP: 5568 return nvme_set_feature_timestamp(n, req); 5569 case NVME_HOST_BEHAVIOR_SUPPORT: 5570 status = nvme_h2c(n, (uint8_t *)&n->features.hbs, 5571 sizeof(n->features.hbs), req); 5572 if (status) { 5573 return status; 5574 } 5575 5576 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 5577 ns = nvme_ns(n, i); 5578 5579 if (!ns) { 5580 continue; 5581 } 5582 5583 ns->id_ns.nlbaf = ns->nlbaf - 1; 5584 if (!n->features.hbs.lbafee) { 5585 ns->id_ns.nlbaf = MIN(ns->id_ns.nlbaf, 15); 5586 } 5587 } 5588 5589 return status; 5590 case NVME_COMMAND_SET_PROFILE: 5591 if (dw11 & 0x1ff) { 5592 trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff); 5593 return NVME_CMD_SET_CMB_REJECTED | NVME_DNR; 5594 } 5595 break; 5596 default: 5597 return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; 5598 } 5599 return NVME_SUCCESS; 5600 } 5601 5602 static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req) 5603 { 5604 trace_pci_nvme_aer(nvme_cid(req)); 5605 5606 if (n->outstanding_aers > n->params.aerl) { 5607 trace_pci_nvme_aer_aerl_exceeded(); 5608 return NVME_AER_LIMIT_EXCEEDED; 5609 } 5610 5611 n->aer_reqs[n->outstanding_aers] = req; 5612 n->outstanding_aers++; 5613 5614 if (!QTAILQ_EMPTY(&n->aer_queue)) { 5615 nvme_process_aers(n); 5616 } 5617 5618 return NVME_NO_COMPLETE; 5619 } 5620 5621 static void nvme_update_dmrsl(NvmeCtrl *n) 5622 { 5623 int nsid; 5624 5625 for (nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) { 5626 NvmeNamespace *ns = nvme_ns(n, nsid); 5627 if (!ns) { 5628 continue; 5629 } 5630 5631 n->dmrsl = MIN_NON_ZERO(n->dmrsl, 5632 BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); 5633 } 5634 } 5635 5636 static void nvme_select_iocs_ns(NvmeCtrl *n, NvmeNamespace *ns) 5637 { 5638 uint32_t cc = ldl_le_p(&n->bar.cc); 5639 5640 ns->iocs = nvme_cse_iocs_none; 5641 switch (ns->csi) { 5642 case NVME_CSI_NVM: 5643 if (NVME_CC_CSS(cc) != NVME_CC_CSS_ADMIN_ONLY) { 5644 ns->iocs = nvme_cse_iocs_nvm; 5645 } 5646 break; 5647 case NVME_CSI_ZONED: 5648 if (NVME_CC_CSS(cc) == NVME_CC_CSS_CSI) { 5649 ns->iocs = nvme_cse_iocs_zoned; 5650 } else if (NVME_CC_CSS(cc) == NVME_CC_CSS_NVM) { 5651 ns->iocs = nvme_cse_iocs_nvm; 5652 } 5653 break; 5654 } 5655 } 5656 5657 static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) 5658 { 5659 NvmeNamespace *ns; 5660 NvmeCtrl *ctrl; 5661 uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {}; 5662 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 5663 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); 5664 uint8_t sel = dw10 & 0xf; 5665 uint16_t *nr_ids = &list[0]; 5666 uint16_t *ids = &list[1]; 5667 uint16_t ret; 5668 int i; 5669 5670 trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf); 5671 5672 if (!nvme_nsid_valid(n, nsid)) { 5673 return NVME_INVALID_NSID | NVME_DNR; 5674 } 5675 5676 ns = nvme_subsys_ns(n->subsys, nsid); 5677 if (!ns) { 5678 return NVME_INVALID_FIELD | NVME_DNR; 5679 } 5680 5681 ret = nvme_h2c(n, (uint8_t *)list, 4096, req); 5682 if (ret) { 5683 return ret; 5684 } 5685 5686 if (!*nr_ids) { 5687 return NVME_NS_CTRL_LIST_INVALID | NVME_DNR; 5688 } 5689 5690 *nr_ids = MIN(*nr_ids, NVME_CONTROLLER_LIST_SIZE - 1); 5691 for (i = 0; i < *nr_ids; i++) { 5692 ctrl = nvme_subsys_ctrl(n->subsys, ids[i]); 5693 if (!ctrl) { 5694 return NVME_NS_CTRL_LIST_INVALID | NVME_DNR; 5695 } 5696 5697 switch (sel) { 5698 case NVME_NS_ATTACHMENT_ATTACH: 5699 if (nvme_ns(ctrl, nsid)) { 5700 return NVME_NS_ALREADY_ATTACHED | NVME_DNR; 5701 } 5702 5703 if (ns->attached && !ns->params.shared) { 5704 return NVME_NS_PRIVATE | NVME_DNR; 5705 } 5706 5707 nvme_attach_ns(ctrl, ns); 5708 nvme_select_iocs_ns(ctrl, ns); 5709 5710 break; 5711 5712 case NVME_NS_ATTACHMENT_DETACH: 5713 if (!nvme_ns(ctrl, nsid)) { 5714 return NVME_NS_NOT_ATTACHED | NVME_DNR; 5715 } 5716 5717 ctrl->namespaces[nsid] = NULL; 5718 ns->attached--; 5719 5720 nvme_update_dmrsl(ctrl); 5721 5722 break; 5723 5724 default: 5725 return NVME_INVALID_FIELD | NVME_DNR; 5726 } 5727 5728 /* 5729 * Add namespace id to the changed namespace id list for event clearing 5730 * via Get Log Page command. 5731 */ 5732 if (!test_and_set_bit(nsid, ctrl->changed_nsids)) { 5733 nvme_enqueue_event(ctrl, NVME_AER_TYPE_NOTICE, 5734 NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED, 5735 NVME_LOG_CHANGED_NSLIST); 5736 } 5737 } 5738 5739 return NVME_SUCCESS; 5740 } 5741 5742 typedef struct NvmeFormatAIOCB { 5743 BlockAIOCB common; 5744 BlockAIOCB *aiocb; 5745 QEMUBH *bh; 5746 NvmeRequest *req; 5747 int ret; 5748 5749 NvmeNamespace *ns; 5750 uint32_t nsid; 5751 bool broadcast; 5752 int64_t offset; 5753 5754 uint8_t lbaf; 5755 uint8_t mset; 5756 uint8_t pi; 5757 uint8_t pil; 5758 } NvmeFormatAIOCB; 5759 5760 static void nvme_format_bh(void *opaque); 5761 5762 static void nvme_format_cancel(BlockAIOCB *aiocb) 5763 { 5764 NvmeFormatAIOCB *iocb = container_of(aiocb, NvmeFormatAIOCB, common); 5765 5766 if (iocb->aiocb) { 5767 blk_aio_cancel_async(iocb->aiocb); 5768 } 5769 } 5770 5771 static const AIOCBInfo nvme_format_aiocb_info = { 5772 .aiocb_size = sizeof(NvmeFormatAIOCB), 5773 .cancel_async = nvme_format_cancel, 5774 .get_aio_context = nvme_get_aio_context, 5775 }; 5776 5777 static void nvme_format_set(NvmeNamespace *ns, uint8_t lbaf, uint8_t mset, 5778 uint8_t pi, uint8_t pil) 5779 { 5780 uint8_t lbafl = lbaf & 0xf; 5781 uint8_t lbafu = lbaf >> 4; 5782 5783 trace_pci_nvme_format_set(ns->params.nsid, lbaf, mset, pi, pil); 5784 5785 ns->id_ns.dps = (pil << 3) | pi; 5786 ns->id_ns.flbas = (lbafu << 5) | (mset << 4) | lbafl; 5787 5788 nvme_ns_init_format(ns); 5789 } 5790 5791 static void nvme_format_ns_cb(void *opaque, int ret) 5792 { 5793 NvmeFormatAIOCB *iocb = opaque; 5794 NvmeNamespace *ns = iocb->ns; 5795 int bytes; 5796 5797 if (ret < 0) { 5798 iocb->ret = ret; 5799 goto done; 5800 } 5801 5802 assert(ns); 5803 5804 if (iocb->offset < ns->size) { 5805 bytes = MIN(BDRV_REQUEST_MAX_BYTES, ns->size - iocb->offset); 5806 5807 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, iocb->offset, 5808 bytes, BDRV_REQ_MAY_UNMAP, 5809 nvme_format_ns_cb, iocb); 5810 5811 iocb->offset += bytes; 5812 return; 5813 } 5814 5815 nvme_format_set(ns, iocb->lbaf, iocb->mset, iocb->pi, iocb->pil); 5816 ns->status = 0x0; 5817 iocb->ns = NULL; 5818 iocb->offset = 0; 5819 5820 done: 5821 iocb->aiocb = NULL; 5822 qemu_bh_schedule(iocb->bh); 5823 } 5824 5825 static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi) 5826 { 5827 if (ns->params.zoned) { 5828 return NVME_INVALID_FORMAT | NVME_DNR; 5829 } 5830 5831 if (lbaf > ns->id_ns.nlbaf) { 5832 return NVME_INVALID_FORMAT | NVME_DNR; 5833 } 5834 5835 if (pi && (ns->id_ns.lbaf[lbaf].ms < nvme_pi_tuple_size(ns))) { 5836 return NVME_INVALID_FORMAT | NVME_DNR; 5837 } 5838 5839 if (pi && pi > NVME_ID_NS_DPS_TYPE_3) { 5840 return NVME_INVALID_FIELD | NVME_DNR; 5841 } 5842 5843 return NVME_SUCCESS; 5844 } 5845 5846 static void nvme_format_bh(void *opaque) 5847 { 5848 NvmeFormatAIOCB *iocb = opaque; 5849 NvmeRequest *req = iocb->req; 5850 NvmeCtrl *n = nvme_ctrl(req); 5851 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); 5852 uint8_t lbaf = dw10 & 0xf; 5853 uint8_t pi = (dw10 >> 5) & 0x7; 5854 uint16_t status; 5855 int i; 5856 5857 if (iocb->ret < 0) { 5858 goto done; 5859 } 5860 5861 if (iocb->broadcast) { 5862 for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { 5863 iocb->ns = nvme_ns(n, i); 5864 if (iocb->ns) { 5865 iocb->nsid = i; 5866 break; 5867 } 5868 } 5869 } 5870 5871 if (!iocb->ns) { 5872 goto done; 5873 } 5874 5875 status = nvme_format_check(iocb->ns, lbaf, pi); 5876 if (status) { 5877 req->status = status; 5878 goto done; 5879 } 5880 5881 iocb->ns->status = NVME_FORMAT_IN_PROGRESS; 5882 nvme_format_ns_cb(iocb, 0); 5883 return; 5884 5885 done: 5886 qemu_bh_delete(iocb->bh); 5887 iocb->bh = NULL; 5888 5889 iocb->common.cb(iocb->common.opaque, iocb->ret); 5890 5891 qemu_aio_unref(iocb); 5892 } 5893 5894 static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req) 5895 { 5896 NvmeFormatAIOCB *iocb; 5897 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 5898 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); 5899 uint8_t lbaf = dw10 & 0xf; 5900 uint8_t mset = (dw10 >> 4) & 0x1; 5901 uint8_t pi = (dw10 >> 5) & 0x7; 5902 uint8_t pil = (dw10 >> 8) & 0x1; 5903 uint8_t lbafu = (dw10 >> 12) & 0x3; 5904 uint16_t status; 5905 5906 iocb = qemu_aio_get(&nvme_format_aiocb_info, NULL, nvme_misc_cb, req); 5907 5908 iocb->req = req; 5909 iocb->bh = qemu_bh_new(nvme_format_bh, iocb); 5910 iocb->ret = 0; 5911 iocb->ns = NULL; 5912 iocb->nsid = 0; 5913 iocb->lbaf = lbaf; 5914 iocb->mset = mset; 5915 iocb->pi = pi; 5916 iocb->pil = pil; 5917 iocb->broadcast = (nsid == NVME_NSID_BROADCAST); 5918 iocb->offset = 0; 5919 5920 if (n->features.hbs.lbafee) { 5921 iocb->lbaf |= lbafu << 4; 5922 } 5923 5924 if (!iocb->broadcast) { 5925 if (!nvme_nsid_valid(n, nsid)) { 5926 status = NVME_INVALID_NSID | NVME_DNR; 5927 goto out; 5928 } 5929 5930 iocb->ns = nvme_ns(n, nsid); 5931 if (!iocb->ns) { 5932 status = NVME_INVALID_FIELD | NVME_DNR; 5933 goto out; 5934 } 5935 } 5936 5937 req->aiocb = &iocb->common; 5938 qemu_bh_schedule(iocb->bh); 5939 5940 return NVME_NO_COMPLETE; 5941 5942 out: 5943 qemu_bh_delete(iocb->bh); 5944 iocb->bh = NULL; 5945 qemu_aio_unref(iocb); 5946 return status; 5947 } 5948 5949 static void nvme_get_virt_res_num(NvmeCtrl *n, uint8_t rt, int *num_total, 5950 int *num_prim, int *num_sec) 5951 { 5952 *num_total = le32_to_cpu(rt ? 5953 n->pri_ctrl_cap.vifrt : n->pri_ctrl_cap.vqfrt); 5954 *num_prim = le16_to_cpu(rt ? 5955 n->pri_ctrl_cap.virfap : n->pri_ctrl_cap.vqrfap); 5956 *num_sec = le16_to_cpu(rt ? n->pri_ctrl_cap.virfa : n->pri_ctrl_cap.vqrfa); 5957 } 5958 5959 static uint16_t nvme_assign_virt_res_to_prim(NvmeCtrl *n, NvmeRequest *req, 5960 uint16_t cntlid, uint8_t rt, 5961 int nr) 5962 { 5963 int num_total, num_prim, num_sec; 5964 5965 if (cntlid != n->cntlid) { 5966 return NVME_INVALID_CTRL_ID | NVME_DNR; 5967 } 5968 5969 nvme_get_virt_res_num(n, rt, &num_total, &num_prim, &num_sec); 5970 5971 if (nr > num_total) { 5972 return NVME_INVALID_NUM_RESOURCES | NVME_DNR; 5973 } 5974 5975 if (nr > num_total - num_sec) { 5976 return NVME_INVALID_RESOURCE_ID | NVME_DNR; 5977 } 5978 5979 if (rt) { 5980 n->next_pri_ctrl_cap.virfap = cpu_to_le16(nr); 5981 } else { 5982 n->next_pri_ctrl_cap.vqrfap = cpu_to_le16(nr); 5983 } 5984 5985 req->cqe.result = cpu_to_le32(nr); 5986 return req->status; 5987 } 5988 5989 static void nvme_update_virt_res(NvmeCtrl *n, NvmeSecCtrlEntry *sctrl, 5990 uint8_t rt, int nr) 5991 { 5992 int prev_nr, prev_total; 5993 5994 if (rt) { 5995 prev_nr = le16_to_cpu(sctrl->nvi); 5996 prev_total = le32_to_cpu(n->pri_ctrl_cap.virfa); 5997 sctrl->nvi = cpu_to_le16(nr); 5998 n->pri_ctrl_cap.virfa = cpu_to_le32(prev_total + nr - prev_nr); 5999 } else { 6000 prev_nr = le16_to_cpu(sctrl->nvq); 6001 prev_total = le32_to_cpu(n->pri_ctrl_cap.vqrfa); 6002 sctrl->nvq = cpu_to_le16(nr); 6003 n->pri_ctrl_cap.vqrfa = cpu_to_le32(prev_total + nr - prev_nr); 6004 } 6005 } 6006 6007 static uint16_t nvme_assign_virt_res_to_sec(NvmeCtrl *n, NvmeRequest *req, 6008 uint16_t cntlid, uint8_t rt, int nr) 6009 { 6010 int num_total, num_prim, num_sec, num_free, diff, limit; 6011 NvmeSecCtrlEntry *sctrl; 6012 6013 sctrl = nvme_sctrl_for_cntlid(n, cntlid); 6014 if (!sctrl) { 6015 return NVME_INVALID_CTRL_ID | NVME_DNR; 6016 } 6017 6018 if (sctrl->scs) { 6019 return NVME_INVALID_SEC_CTRL_STATE | NVME_DNR; 6020 } 6021 6022 limit = le16_to_cpu(rt ? n->pri_ctrl_cap.vifrsm : n->pri_ctrl_cap.vqfrsm); 6023 if (nr > limit) { 6024 return NVME_INVALID_NUM_RESOURCES | NVME_DNR; 6025 } 6026 6027 nvme_get_virt_res_num(n, rt, &num_total, &num_prim, &num_sec); 6028 num_free = num_total - num_prim - num_sec; 6029 diff = nr - le16_to_cpu(rt ? sctrl->nvi : sctrl->nvq); 6030 6031 if (diff > num_free) { 6032 return NVME_INVALID_RESOURCE_ID | NVME_DNR; 6033 } 6034 6035 nvme_update_virt_res(n, sctrl, rt, nr); 6036 req->cqe.result = cpu_to_le32(nr); 6037 6038 return req->status; 6039 } 6040 6041 static uint16_t nvme_virt_set_state(NvmeCtrl *n, uint16_t cntlid, bool online) 6042 { 6043 NvmeCtrl *sn = NULL; 6044 NvmeSecCtrlEntry *sctrl; 6045 int vf_index; 6046 6047 sctrl = nvme_sctrl_for_cntlid(n, cntlid); 6048 if (!sctrl) { 6049 return NVME_INVALID_CTRL_ID | NVME_DNR; 6050 } 6051 6052 if (!pci_is_vf(&n->parent_obj)) { 6053 vf_index = le16_to_cpu(sctrl->vfn) - 1; 6054 sn = NVME(pcie_sriov_get_vf_at_index(&n->parent_obj, vf_index)); 6055 } 6056 6057 if (online) { 6058 if (!sctrl->nvi || (le16_to_cpu(sctrl->nvq) < 2) || !sn) { 6059 return NVME_INVALID_SEC_CTRL_STATE | NVME_DNR; 6060 } 6061 6062 if (!sctrl->scs) { 6063 sctrl->scs = 0x1; 6064 nvme_ctrl_reset(sn, NVME_RESET_FUNCTION); 6065 } 6066 } else { 6067 nvme_update_virt_res(n, sctrl, NVME_VIRT_RES_INTERRUPT, 0); 6068 nvme_update_virt_res(n, sctrl, NVME_VIRT_RES_QUEUE, 0); 6069 6070 if (sctrl->scs) { 6071 sctrl->scs = 0x0; 6072 if (sn) { 6073 nvme_ctrl_reset(sn, NVME_RESET_FUNCTION); 6074 } 6075 } 6076 } 6077 6078 return NVME_SUCCESS; 6079 } 6080 6081 static uint16_t nvme_virt_mngmt(NvmeCtrl *n, NvmeRequest *req) 6082 { 6083 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); 6084 uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); 6085 uint8_t act = dw10 & 0xf; 6086 uint8_t rt = (dw10 >> 8) & 0x7; 6087 uint16_t cntlid = (dw10 >> 16) & 0xffff; 6088 int nr = dw11 & 0xffff; 6089 6090 trace_pci_nvme_virt_mngmt(nvme_cid(req), act, cntlid, rt ? "VI" : "VQ", nr); 6091 6092 if (rt != NVME_VIRT_RES_QUEUE && rt != NVME_VIRT_RES_INTERRUPT) { 6093 return NVME_INVALID_RESOURCE_ID | NVME_DNR; 6094 } 6095 6096 switch (act) { 6097 case NVME_VIRT_MNGMT_ACTION_SEC_ASSIGN: 6098 return nvme_assign_virt_res_to_sec(n, req, cntlid, rt, nr); 6099 case NVME_VIRT_MNGMT_ACTION_PRM_ALLOC: 6100 return nvme_assign_virt_res_to_prim(n, req, cntlid, rt, nr); 6101 case NVME_VIRT_MNGMT_ACTION_SEC_ONLINE: 6102 return nvme_virt_set_state(n, cntlid, true); 6103 case NVME_VIRT_MNGMT_ACTION_SEC_OFFLINE: 6104 return nvme_virt_set_state(n, cntlid, false); 6105 default: 6106 return NVME_INVALID_FIELD | NVME_DNR; 6107 } 6108 } 6109 6110 static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req) 6111 { 6112 uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1); 6113 uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2); 6114 int i; 6115 6116 /* Address should be page aligned */ 6117 if (dbs_addr & (n->page_size - 1) || eis_addr & (n->page_size - 1)) { 6118 return NVME_INVALID_FIELD | NVME_DNR; 6119 } 6120 6121 /* Save shadow buffer base addr for use during queue creation */ 6122 n->dbbuf_dbs = dbs_addr; 6123 n->dbbuf_eis = eis_addr; 6124 n->dbbuf_enabled = true; 6125 6126 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { 6127 NvmeSQueue *sq = n->sq[i]; 6128 NvmeCQueue *cq = n->cq[i]; 6129 6130 if (sq) { 6131 /* 6132 * CAP.DSTRD is 0, so offset of ith sq db_addr is (i<<3) 6133 * nvme_process_db() uses this hard-coded way to calculate 6134 * doorbell offsets. Be consistent with that here. 6135 */ 6136 sq->db_addr = dbs_addr + (i << 3); 6137 sq->ei_addr = eis_addr + (i << 3); 6138 pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail, 6139 sizeof(sq->tail)); 6140 6141 if (n->params.ioeventfd && sq->sqid != 0) { 6142 if (!nvme_init_sq_ioeventfd(sq)) { 6143 sq->ioeventfd_enabled = true; 6144 } 6145 } 6146 } 6147 6148 if (cq) { 6149 /* CAP.DSTRD is 0, so offset of ith cq db_addr is (i<<3)+(1<<2) */ 6150 cq->db_addr = dbs_addr + (i << 3) + (1 << 2); 6151 cq->ei_addr = eis_addr + (i << 3) + (1 << 2); 6152 pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head, 6153 sizeof(cq->head)); 6154 6155 if (n->params.ioeventfd && cq->cqid != 0) { 6156 if (!nvme_init_cq_ioeventfd(cq)) { 6157 cq->ioeventfd_enabled = true; 6158 } 6159 } 6160 } 6161 } 6162 6163 trace_pci_nvme_dbbuf_config(dbs_addr, eis_addr); 6164 6165 return NVME_SUCCESS; 6166 } 6167 6168 static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) 6169 { 6170 trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode, 6171 nvme_adm_opc_str(req->cmd.opcode)); 6172 6173 if (!(nvme_cse_acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { 6174 trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode); 6175 return NVME_INVALID_OPCODE | NVME_DNR; 6176 } 6177 6178 /* SGLs shall not be used for Admin commands in NVMe over PCIe */ 6179 if (NVME_CMD_FLAGS_PSDT(req->cmd.flags) != NVME_PSDT_PRP) { 6180 return NVME_INVALID_FIELD | NVME_DNR; 6181 } 6182 6183 if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { 6184 return NVME_INVALID_FIELD; 6185 } 6186 6187 switch (req->cmd.opcode) { 6188 case NVME_ADM_CMD_DELETE_SQ: 6189 return nvme_del_sq(n, req); 6190 case NVME_ADM_CMD_CREATE_SQ: 6191 return nvme_create_sq(n, req); 6192 case NVME_ADM_CMD_GET_LOG_PAGE: 6193 return nvme_get_log(n, req); 6194 case NVME_ADM_CMD_DELETE_CQ: 6195 return nvme_del_cq(n, req); 6196 case NVME_ADM_CMD_CREATE_CQ: 6197 return nvme_create_cq(n, req); 6198 case NVME_ADM_CMD_IDENTIFY: 6199 return nvme_identify(n, req); 6200 case NVME_ADM_CMD_ABORT: 6201 return nvme_abort(n, req); 6202 case NVME_ADM_CMD_SET_FEATURES: 6203 return nvme_set_feature(n, req); 6204 case NVME_ADM_CMD_GET_FEATURES: 6205 return nvme_get_feature(n, req); 6206 case NVME_ADM_CMD_ASYNC_EV_REQ: 6207 return nvme_aer(n, req); 6208 case NVME_ADM_CMD_NS_ATTACHMENT: 6209 return nvme_ns_attachment(n, req); 6210 case NVME_ADM_CMD_VIRT_MNGMT: 6211 return nvme_virt_mngmt(n, req); 6212 case NVME_ADM_CMD_DBBUF_CONFIG: 6213 return nvme_dbbuf_config(n, req); 6214 case NVME_ADM_CMD_FORMAT_NVM: 6215 return nvme_format(n, req); 6216 default: 6217 assert(false); 6218 } 6219 6220 return NVME_INVALID_OPCODE | NVME_DNR; 6221 } 6222 6223 static void nvme_update_sq_eventidx(const NvmeSQueue *sq) 6224 { 6225 pci_dma_write(&sq->ctrl->parent_obj, sq->ei_addr, &sq->tail, 6226 sizeof(sq->tail)); 6227 trace_pci_nvme_eventidx_sq(sq->sqid, sq->tail); 6228 } 6229 6230 static void nvme_update_sq_tail(NvmeSQueue *sq) 6231 { 6232 pci_dma_read(&sq->ctrl->parent_obj, sq->db_addr, &sq->tail, 6233 sizeof(sq->tail)); 6234 trace_pci_nvme_shadow_doorbell_sq(sq->sqid, sq->tail); 6235 } 6236 6237 static void nvme_process_sq(void *opaque) 6238 { 6239 NvmeSQueue *sq = opaque; 6240 NvmeCtrl *n = sq->ctrl; 6241 NvmeCQueue *cq = n->cq[sq->cqid]; 6242 6243 uint16_t status; 6244 hwaddr addr; 6245 NvmeCmd cmd; 6246 NvmeRequest *req; 6247 6248 if (n->dbbuf_enabled) { 6249 nvme_update_sq_tail(sq); 6250 } 6251 6252 while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) { 6253 addr = sq->dma_addr + sq->head * n->sqe_size; 6254 if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) { 6255 trace_pci_nvme_err_addr_read(addr); 6256 trace_pci_nvme_err_cfs(); 6257 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); 6258 break; 6259 } 6260 nvme_inc_sq_head(sq); 6261 6262 req = QTAILQ_FIRST(&sq->req_list); 6263 QTAILQ_REMOVE(&sq->req_list, req, entry); 6264 QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry); 6265 nvme_req_clear(req); 6266 req->cqe.cid = cmd.cid; 6267 memcpy(&req->cmd, &cmd, sizeof(NvmeCmd)); 6268 6269 status = sq->sqid ? nvme_io_cmd(n, req) : 6270 nvme_admin_cmd(n, req); 6271 if (status != NVME_NO_COMPLETE) { 6272 req->status = status; 6273 nvme_enqueue_req_completion(cq, req); 6274 } 6275 6276 if (n->dbbuf_enabled) { 6277 nvme_update_sq_eventidx(sq); 6278 nvme_update_sq_tail(sq); 6279 } 6280 } 6281 } 6282 6283 static void nvme_update_msixcap_ts(PCIDevice *pci_dev, uint32_t table_size) 6284 { 6285 uint8_t *config; 6286 6287 if (!msix_present(pci_dev)) { 6288 return; 6289 } 6290 6291 assert(table_size > 0 && table_size <= pci_dev->msix_entries_nr); 6292 6293 config = pci_dev->config + pci_dev->msix_cap; 6294 pci_set_word_by_mask(config + PCI_MSIX_FLAGS, PCI_MSIX_FLAGS_QSIZE, 6295 table_size - 1); 6296 } 6297 6298 static void nvme_activate_virt_res(NvmeCtrl *n) 6299 { 6300 PCIDevice *pci_dev = &n->parent_obj; 6301 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; 6302 NvmeSecCtrlEntry *sctrl; 6303 6304 /* -1 to account for the admin queue */ 6305 if (pci_is_vf(pci_dev)) { 6306 sctrl = nvme_sctrl(n); 6307 cap->vqprt = sctrl->nvq; 6308 cap->viprt = sctrl->nvi; 6309 n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; 6310 n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1; 6311 } else { 6312 cap->vqrfap = n->next_pri_ctrl_cap.vqrfap; 6313 cap->virfap = n->next_pri_ctrl_cap.virfap; 6314 n->conf_ioqpairs = le16_to_cpu(cap->vqprt) + 6315 le16_to_cpu(cap->vqrfap) - 1; 6316 n->conf_msix_qsize = le16_to_cpu(cap->viprt) + 6317 le16_to_cpu(cap->virfap); 6318 } 6319 } 6320 6321 static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst) 6322 { 6323 PCIDevice *pci_dev = &n->parent_obj; 6324 NvmeSecCtrlEntry *sctrl; 6325 NvmeNamespace *ns; 6326 int i; 6327 6328 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 6329 ns = nvme_ns(n, i); 6330 if (!ns) { 6331 continue; 6332 } 6333 6334 nvme_ns_drain(ns); 6335 } 6336 6337 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { 6338 if (n->sq[i] != NULL) { 6339 nvme_free_sq(n->sq[i], n); 6340 } 6341 } 6342 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { 6343 if (n->cq[i] != NULL) { 6344 nvme_free_cq(n->cq[i], n); 6345 } 6346 } 6347 6348 while (!QTAILQ_EMPTY(&n->aer_queue)) { 6349 NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue); 6350 QTAILQ_REMOVE(&n->aer_queue, event, entry); 6351 g_free(event); 6352 } 6353 6354 if (n->params.sriov_max_vfs) { 6355 if (!pci_is_vf(pci_dev)) { 6356 for (i = 0; i < n->sec_ctrl_list.numcntl; i++) { 6357 sctrl = &n->sec_ctrl_list.sec[i]; 6358 nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); 6359 } 6360 6361 if (rst != NVME_RESET_CONTROLLER) { 6362 pcie_sriov_pf_disable_vfs(pci_dev); 6363 } 6364 } 6365 6366 if (rst != NVME_RESET_CONTROLLER) { 6367 nvme_activate_virt_res(n); 6368 } 6369 } 6370 6371 n->aer_queued = 0; 6372 n->aer_mask = 0; 6373 n->outstanding_aers = 0; 6374 n->qs_created = false; 6375 6376 nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); 6377 6378 if (pci_is_vf(pci_dev)) { 6379 sctrl = nvme_sctrl(n); 6380 6381 stl_le_p(&n->bar.csts, sctrl->scs ? 0 : NVME_CSTS_FAILED); 6382 } else { 6383 stl_le_p(&n->bar.csts, 0); 6384 } 6385 6386 stl_le_p(&n->bar.intms, 0); 6387 stl_le_p(&n->bar.intmc, 0); 6388 stl_le_p(&n->bar.cc, 0); 6389 6390 n->dbbuf_dbs = 0; 6391 n->dbbuf_eis = 0; 6392 n->dbbuf_enabled = false; 6393 } 6394 6395 static void nvme_ctrl_shutdown(NvmeCtrl *n) 6396 { 6397 NvmeNamespace *ns; 6398 int i; 6399 6400 if (n->pmr.dev) { 6401 memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); 6402 } 6403 6404 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 6405 ns = nvme_ns(n, i); 6406 if (!ns) { 6407 continue; 6408 } 6409 6410 nvme_ns_shutdown(ns); 6411 } 6412 } 6413 6414 static void nvme_select_iocs(NvmeCtrl *n) 6415 { 6416 NvmeNamespace *ns; 6417 int i; 6418 6419 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 6420 ns = nvme_ns(n, i); 6421 if (!ns) { 6422 continue; 6423 } 6424 6425 nvme_select_iocs_ns(n, ns); 6426 } 6427 } 6428 6429 static int nvme_start_ctrl(NvmeCtrl *n) 6430 { 6431 uint64_t cap = ldq_le_p(&n->bar.cap); 6432 uint32_t cc = ldl_le_p(&n->bar.cc); 6433 uint32_t aqa = ldl_le_p(&n->bar.aqa); 6434 uint64_t asq = ldq_le_p(&n->bar.asq); 6435 uint64_t acq = ldq_le_p(&n->bar.acq); 6436 uint32_t page_bits = NVME_CC_MPS(cc) + 12; 6437 uint32_t page_size = 1 << page_bits; 6438 NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); 6439 6440 if (pci_is_vf(&n->parent_obj) && !sctrl->scs) { 6441 trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi), 6442 le16_to_cpu(sctrl->nvq), 6443 sctrl->scs ? "ONLINE" : 6444 "OFFLINE"); 6445 return -1; 6446 } 6447 if (unlikely(n->cq[0])) { 6448 trace_pci_nvme_err_startfail_cq(); 6449 return -1; 6450 } 6451 if (unlikely(n->sq[0])) { 6452 trace_pci_nvme_err_startfail_sq(); 6453 return -1; 6454 } 6455 if (unlikely(asq & (page_size - 1))) { 6456 trace_pci_nvme_err_startfail_asq_misaligned(asq); 6457 return -1; 6458 } 6459 if (unlikely(acq & (page_size - 1))) { 6460 trace_pci_nvme_err_startfail_acq_misaligned(acq); 6461 return -1; 6462 } 6463 if (unlikely(!(NVME_CAP_CSS(cap) & (1 << NVME_CC_CSS(cc))))) { 6464 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(cc)); 6465 return -1; 6466 } 6467 if (unlikely(NVME_CC_MPS(cc) < NVME_CAP_MPSMIN(cap))) { 6468 trace_pci_nvme_err_startfail_page_too_small( 6469 NVME_CC_MPS(cc), 6470 NVME_CAP_MPSMIN(cap)); 6471 return -1; 6472 } 6473 if (unlikely(NVME_CC_MPS(cc) > 6474 NVME_CAP_MPSMAX(cap))) { 6475 trace_pci_nvme_err_startfail_page_too_large( 6476 NVME_CC_MPS(cc), 6477 NVME_CAP_MPSMAX(cap)); 6478 return -1; 6479 } 6480 if (unlikely(NVME_CC_IOCQES(cc) < 6481 NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) { 6482 trace_pci_nvme_err_startfail_cqent_too_small( 6483 NVME_CC_IOCQES(cc), 6484 NVME_CTRL_CQES_MIN(cap)); 6485 return -1; 6486 } 6487 if (unlikely(NVME_CC_IOCQES(cc) > 6488 NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) { 6489 trace_pci_nvme_err_startfail_cqent_too_large( 6490 NVME_CC_IOCQES(cc), 6491 NVME_CTRL_CQES_MAX(cap)); 6492 return -1; 6493 } 6494 if (unlikely(NVME_CC_IOSQES(cc) < 6495 NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) { 6496 trace_pci_nvme_err_startfail_sqent_too_small( 6497 NVME_CC_IOSQES(cc), 6498 NVME_CTRL_SQES_MIN(cap)); 6499 return -1; 6500 } 6501 if (unlikely(NVME_CC_IOSQES(cc) > 6502 NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) { 6503 trace_pci_nvme_err_startfail_sqent_too_large( 6504 NVME_CC_IOSQES(cc), 6505 NVME_CTRL_SQES_MAX(cap)); 6506 return -1; 6507 } 6508 if (unlikely(!NVME_AQA_ASQS(aqa))) { 6509 trace_pci_nvme_err_startfail_asqent_sz_zero(); 6510 return -1; 6511 } 6512 if (unlikely(!NVME_AQA_ACQS(aqa))) { 6513 trace_pci_nvme_err_startfail_acqent_sz_zero(); 6514 return -1; 6515 } 6516 6517 n->page_bits = page_bits; 6518 n->page_size = page_size; 6519 n->max_prp_ents = n->page_size / sizeof(uint64_t); 6520 n->cqe_size = 1 << NVME_CC_IOCQES(cc); 6521 n->sqe_size = 1 << NVME_CC_IOSQES(cc); 6522 nvme_init_cq(&n->admin_cq, n, acq, 0, 0, NVME_AQA_ACQS(aqa) + 1, 1); 6523 nvme_init_sq(&n->admin_sq, n, asq, 0, 0, NVME_AQA_ASQS(aqa) + 1); 6524 6525 nvme_set_timestamp(n, 0ULL); 6526 6527 nvme_select_iocs(n); 6528 6529 return 0; 6530 } 6531 6532 static void nvme_cmb_enable_regs(NvmeCtrl *n) 6533 { 6534 uint32_t cmbloc = ldl_le_p(&n->bar.cmbloc); 6535 uint32_t cmbsz = ldl_le_p(&n->bar.cmbsz); 6536 6537 NVME_CMBLOC_SET_CDPCILS(cmbloc, 1); 6538 NVME_CMBLOC_SET_CDPMLS(cmbloc, 1); 6539 NVME_CMBLOC_SET_BIR(cmbloc, NVME_CMB_BIR); 6540 stl_le_p(&n->bar.cmbloc, cmbloc); 6541 6542 NVME_CMBSZ_SET_SQS(cmbsz, 1); 6543 NVME_CMBSZ_SET_CQS(cmbsz, 0); 6544 NVME_CMBSZ_SET_LISTS(cmbsz, 1); 6545 NVME_CMBSZ_SET_RDS(cmbsz, 1); 6546 NVME_CMBSZ_SET_WDS(cmbsz, 1); 6547 NVME_CMBSZ_SET_SZU(cmbsz, 2); /* MBs */ 6548 NVME_CMBSZ_SET_SZ(cmbsz, n->params.cmb_size_mb); 6549 stl_le_p(&n->bar.cmbsz, cmbsz); 6550 } 6551 6552 static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, 6553 unsigned size) 6554 { 6555 uint64_t cap = ldq_le_p(&n->bar.cap); 6556 uint32_t cc = ldl_le_p(&n->bar.cc); 6557 uint32_t intms = ldl_le_p(&n->bar.intms); 6558 uint32_t csts = ldl_le_p(&n->bar.csts); 6559 uint32_t pmrsts = ldl_le_p(&n->bar.pmrsts); 6560 6561 if (unlikely(offset & (sizeof(uint32_t) - 1))) { 6562 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32, 6563 "MMIO write not 32-bit aligned," 6564 " offset=0x%"PRIx64"", offset); 6565 /* should be ignored, fall through for now */ 6566 } 6567 6568 if (unlikely(size < sizeof(uint32_t))) { 6569 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall, 6570 "MMIO write smaller than 32-bits," 6571 " offset=0x%"PRIx64", size=%u", 6572 offset, size); 6573 /* should be ignored, fall through for now */ 6574 } 6575 6576 switch (offset) { 6577 case NVME_REG_INTMS: 6578 if (unlikely(msix_enabled(&(n->parent_obj)))) { 6579 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, 6580 "undefined access to interrupt mask set" 6581 " when MSI-X is enabled"); 6582 /* should be ignored, fall through for now */ 6583 } 6584 intms |= data; 6585 stl_le_p(&n->bar.intms, intms); 6586 n->bar.intmc = n->bar.intms; 6587 trace_pci_nvme_mmio_intm_set(data & 0xffffffff, intms); 6588 nvme_irq_check(n); 6589 break; 6590 case NVME_REG_INTMC: 6591 if (unlikely(msix_enabled(&(n->parent_obj)))) { 6592 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, 6593 "undefined access to interrupt mask clr" 6594 " when MSI-X is enabled"); 6595 /* should be ignored, fall through for now */ 6596 } 6597 intms &= ~data; 6598 stl_le_p(&n->bar.intms, intms); 6599 n->bar.intmc = n->bar.intms; 6600 trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, intms); 6601 nvme_irq_check(n); 6602 break; 6603 case NVME_REG_CC: 6604 stl_le_p(&n->bar.cc, data); 6605 6606 trace_pci_nvme_mmio_cfg(data & 0xffffffff); 6607 6608 if (NVME_CC_SHN(data) && !(NVME_CC_SHN(cc))) { 6609 trace_pci_nvme_mmio_shutdown_set(); 6610 nvme_ctrl_shutdown(n); 6611 csts &= ~(CSTS_SHST_MASK << CSTS_SHST_SHIFT); 6612 csts |= NVME_CSTS_SHST_COMPLETE; 6613 } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(cc)) { 6614 trace_pci_nvme_mmio_shutdown_cleared(); 6615 csts &= ~(CSTS_SHST_MASK << CSTS_SHST_SHIFT); 6616 } 6617 6618 if (NVME_CC_EN(data) && !NVME_CC_EN(cc)) { 6619 if (unlikely(nvme_start_ctrl(n))) { 6620 trace_pci_nvme_err_startfail(); 6621 csts = NVME_CSTS_FAILED; 6622 } else { 6623 trace_pci_nvme_mmio_start_success(); 6624 csts = NVME_CSTS_READY; 6625 } 6626 } else if (!NVME_CC_EN(data) && NVME_CC_EN(cc)) { 6627 trace_pci_nvme_mmio_stopped(); 6628 nvme_ctrl_reset(n, NVME_RESET_CONTROLLER); 6629 6630 break; 6631 } 6632 6633 stl_le_p(&n->bar.csts, csts); 6634 6635 break; 6636 case NVME_REG_CSTS: 6637 if (data & (1 << 4)) { 6638 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported, 6639 "attempted to W1C CSTS.NSSRO" 6640 " but CAP.NSSRS is zero (not supported)"); 6641 } else if (data != 0) { 6642 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts, 6643 "attempted to set a read only bit" 6644 " of controller status"); 6645 } 6646 break; 6647 case NVME_REG_NSSR: 6648 if (data == 0x4e564d65) { 6649 trace_pci_nvme_ub_mmiowr_ssreset_unsupported(); 6650 } else { 6651 /* The spec says that writes of other values have no effect */ 6652 return; 6653 } 6654 break; 6655 case NVME_REG_AQA: 6656 stl_le_p(&n->bar.aqa, data); 6657 trace_pci_nvme_mmio_aqattr(data & 0xffffffff); 6658 break; 6659 case NVME_REG_ASQ: 6660 stn_le_p(&n->bar.asq, size, data); 6661 trace_pci_nvme_mmio_asqaddr(data); 6662 break; 6663 case NVME_REG_ASQ + 4: 6664 stl_le_p((uint8_t *)&n->bar.asq + 4, data); 6665 trace_pci_nvme_mmio_asqaddr_hi(data, ldq_le_p(&n->bar.asq)); 6666 break; 6667 case NVME_REG_ACQ: 6668 trace_pci_nvme_mmio_acqaddr(data); 6669 stn_le_p(&n->bar.acq, size, data); 6670 break; 6671 case NVME_REG_ACQ + 4: 6672 stl_le_p((uint8_t *)&n->bar.acq + 4, data); 6673 trace_pci_nvme_mmio_acqaddr_hi(data, ldq_le_p(&n->bar.acq)); 6674 break; 6675 case NVME_REG_CMBLOC: 6676 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved, 6677 "invalid write to reserved CMBLOC" 6678 " when CMBSZ is zero, ignored"); 6679 return; 6680 case NVME_REG_CMBSZ: 6681 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly, 6682 "invalid write to read only CMBSZ, ignored"); 6683 return; 6684 case NVME_REG_CMBMSC: 6685 if (!NVME_CAP_CMBS(cap)) { 6686 return; 6687 } 6688 6689 stn_le_p(&n->bar.cmbmsc, size, data); 6690 n->cmb.cmse = false; 6691 6692 if (NVME_CMBMSC_CRE(data)) { 6693 nvme_cmb_enable_regs(n); 6694 6695 if (NVME_CMBMSC_CMSE(data)) { 6696 uint64_t cmbmsc = ldq_le_p(&n->bar.cmbmsc); 6697 hwaddr cba = NVME_CMBMSC_CBA(cmbmsc) << CMBMSC_CBA_SHIFT; 6698 if (cba + int128_get64(n->cmb.mem.size) < cba) { 6699 uint32_t cmbsts = ldl_le_p(&n->bar.cmbsts); 6700 NVME_CMBSTS_SET_CBAI(cmbsts, 1); 6701 stl_le_p(&n->bar.cmbsts, cmbsts); 6702 return; 6703 } 6704 6705 n->cmb.cba = cba; 6706 n->cmb.cmse = true; 6707 } 6708 } else { 6709 n->bar.cmbsz = 0; 6710 n->bar.cmbloc = 0; 6711 } 6712 6713 return; 6714 case NVME_REG_CMBMSC + 4: 6715 stl_le_p((uint8_t *)&n->bar.cmbmsc + 4, data); 6716 return; 6717 6718 case NVME_REG_PMRCAP: 6719 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly, 6720 "invalid write to PMRCAP register, ignored"); 6721 return; 6722 case NVME_REG_PMRCTL: 6723 if (!NVME_CAP_PMRS(cap)) { 6724 return; 6725 } 6726 6727 stl_le_p(&n->bar.pmrctl, data); 6728 if (NVME_PMRCTL_EN(data)) { 6729 memory_region_set_enabled(&n->pmr.dev->mr, true); 6730 pmrsts = 0; 6731 } else { 6732 memory_region_set_enabled(&n->pmr.dev->mr, false); 6733 NVME_PMRSTS_SET_NRDY(pmrsts, 1); 6734 n->pmr.cmse = false; 6735 } 6736 stl_le_p(&n->bar.pmrsts, pmrsts); 6737 return; 6738 case NVME_REG_PMRSTS: 6739 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly, 6740 "invalid write to PMRSTS register, ignored"); 6741 return; 6742 case NVME_REG_PMREBS: 6743 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly, 6744 "invalid write to PMREBS register, ignored"); 6745 return; 6746 case NVME_REG_PMRSWTP: 6747 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly, 6748 "invalid write to PMRSWTP register, ignored"); 6749 return; 6750 case NVME_REG_PMRMSCL: 6751 if (!NVME_CAP_PMRS(cap)) { 6752 return; 6753 } 6754 6755 stl_le_p(&n->bar.pmrmscl, data); 6756 n->pmr.cmse = false; 6757 6758 if (NVME_PMRMSCL_CMSE(data)) { 6759 uint64_t pmrmscu = ldl_le_p(&n->bar.pmrmscu); 6760 hwaddr cba = pmrmscu << 32 | 6761 (NVME_PMRMSCL_CBA(data) << PMRMSCL_CBA_SHIFT); 6762 if (cba + int128_get64(n->pmr.dev->mr.size) < cba) { 6763 NVME_PMRSTS_SET_CBAI(pmrsts, 1); 6764 stl_le_p(&n->bar.pmrsts, pmrsts); 6765 return; 6766 } 6767 6768 n->pmr.cmse = true; 6769 n->pmr.cba = cba; 6770 } 6771 6772 return; 6773 case NVME_REG_PMRMSCU: 6774 if (!NVME_CAP_PMRS(cap)) { 6775 return; 6776 } 6777 6778 stl_le_p(&n->bar.pmrmscu, data); 6779 return; 6780 default: 6781 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid, 6782 "invalid MMIO write," 6783 " offset=0x%"PRIx64", data=%"PRIx64"", 6784 offset, data); 6785 break; 6786 } 6787 } 6788 6789 static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size) 6790 { 6791 NvmeCtrl *n = (NvmeCtrl *)opaque; 6792 uint8_t *ptr = (uint8_t *)&n->bar; 6793 6794 trace_pci_nvme_mmio_read(addr, size); 6795 6796 if (unlikely(addr & (sizeof(uint32_t) - 1))) { 6797 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32, 6798 "MMIO read not 32-bit aligned," 6799 " offset=0x%"PRIx64"", addr); 6800 /* should RAZ, fall through for now */ 6801 } else if (unlikely(size < sizeof(uint32_t))) { 6802 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall, 6803 "MMIO read smaller than 32-bits," 6804 " offset=0x%"PRIx64"", addr); 6805 /* should RAZ, fall through for now */ 6806 } 6807 6808 if (addr > sizeof(n->bar) - size) { 6809 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs, 6810 "MMIO read beyond last register," 6811 " offset=0x%"PRIx64", returning 0", addr); 6812 6813 return 0; 6814 } 6815 6816 if (pci_is_vf(&n->parent_obj) && !nvme_sctrl(n)->scs && 6817 addr != NVME_REG_CSTS) { 6818 trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size); 6819 return 0; 6820 } 6821 6822 /* 6823 * When PMRWBM bit 1 is set then read from 6824 * from PMRSTS should ensure prior writes 6825 * made it to persistent media 6826 */ 6827 if (addr == NVME_REG_PMRSTS && 6828 (NVME_PMRCAP_PMRWBM(ldl_le_p(&n->bar.pmrcap)) & 0x02)) { 6829 memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); 6830 } 6831 6832 return ldn_le_p(ptr + addr, size); 6833 } 6834 6835 static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) 6836 { 6837 uint32_t qid; 6838 6839 if (unlikely(addr & ((1 << 2) - 1))) { 6840 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned, 6841 "doorbell write not 32-bit aligned," 6842 " offset=0x%"PRIx64", ignoring", addr); 6843 return; 6844 } 6845 6846 if (((addr - 0x1000) >> 2) & 1) { 6847 /* Completion queue doorbell write */ 6848 6849 uint16_t new_head = val & 0xffff; 6850 int start_sqs; 6851 NvmeCQueue *cq; 6852 6853 qid = (addr - (0x1000 + (1 << 2))) >> 3; 6854 if (unlikely(nvme_check_cqid(n, qid))) { 6855 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq, 6856 "completion queue doorbell write" 6857 " for nonexistent queue," 6858 " sqid=%"PRIu32", ignoring", qid); 6859 6860 /* 6861 * NVM Express v1.3d, Section 4.1 state: "If host software writes 6862 * an invalid value to the Submission Queue Tail Doorbell or 6863 * Completion Queue Head Doorbell regiter and an Asynchronous Event 6864 * Request command is outstanding, then an asynchronous event is 6865 * posted to the Admin Completion Queue with a status code of 6866 * Invalid Doorbell Write Value." 6867 * 6868 * Also note that the spec includes the "Invalid Doorbell Register" 6869 * status code, but nowhere does it specify when to use it. 6870 * However, it seems reasonable to use it here in a similar 6871 * fashion. 6872 */ 6873 if (n->outstanding_aers) { 6874 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, 6875 NVME_AER_INFO_ERR_INVALID_DB_REGISTER, 6876 NVME_LOG_ERROR_INFO); 6877 } 6878 6879 return; 6880 } 6881 6882 cq = n->cq[qid]; 6883 if (unlikely(new_head >= cq->size)) { 6884 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead, 6885 "completion queue doorbell write value" 6886 " beyond queue size, sqid=%"PRIu32"," 6887 " new_head=%"PRIu16", ignoring", 6888 qid, new_head); 6889 6890 if (n->outstanding_aers) { 6891 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, 6892 NVME_AER_INFO_ERR_INVALID_DB_VALUE, 6893 NVME_LOG_ERROR_INFO); 6894 } 6895 6896 return; 6897 } 6898 6899 trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head); 6900 6901 start_sqs = nvme_cq_full(cq) ? 1 : 0; 6902 cq->head = new_head; 6903 if (!qid && n->dbbuf_enabled) { 6904 pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head, 6905 sizeof(cq->head)); 6906 } 6907 if (start_sqs) { 6908 NvmeSQueue *sq; 6909 QTAILQ_FOREACH(sq, &cq->sq_list, entry) { 6910 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); 6911 } 6912 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); 6913 } 6914 6915 if (cq->tail == cq->head) { 6916 if (cq->irq_enabled) { 6917 n->cq_pending--; 6918 } 6919 6920 nvme_irq_deassert(n, cq); 6921 } 6922 } else { 6923 /* Submission queue doorbell write */ 6924 6925 uint16_t new_tail = val & 0xffff; 6926 NvmeSQueue *sq; 6927 6928 qid = (addr - 0x1000) >> 3; 6929 if (unlikely(nvme_check_sqid(n, qid))) { 6930 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq, 6931 "submission queue doorbell write" 6932 " for nonexistent queue," 6933 " sqid=%"PRIu32", ignoring", qid); 6934 6935 if (n->outstanding_aers) { 6936 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, 6937 NVME_AER_INFO_ERR_INVALID_DB_REGISTER, 6938 NVME_LOG_ERROR_INFO); 6939 } 6940 6941 return; 6942 } 6943 6944 sq = n->sq[qid]; 6945 if (unlikely(new_tail >= sq->size)) { 6946 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail, 6947 "submission queue doorbell write value" 6948 " beyond queue size, sqid=%"PRIu32"," 6949 " new_tail=%"PRIu16", ignoring", 6950 qid, new_tail); 6951 6952 if (n->outstanding_aers) { 6953 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, 6954 NVME_AER_INFO_ERR_INVALID_DB_VALUE, 6955 NVME_LOG_ERROR_INFO); 6956 } 6957 6958 return; 6959 } 6960 6961 trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail); 6962 6963 sq->tail = new_tail; 6964 if (!qid && n->dbbuf_enabled) { 6965 /* 6966 * The spec states "the host shall also update the controller's 6967 * corresponding doorbell property to match the value of that entry 6968 * in the Shadow Doorbell buffer." 6969 * 6970 * Since this context is currently a VM trap, we can safely enforce 6971 * the requirement from the device side in case the host is 6972 * misbehaving. 6973 * 6974 * Note, we shouldn't have to do this, but various drivers 6975 * including ones that run on Linux, are not updating Admin Queues, 6976 * so we can't trust reading it for an appropriate sq tail. 6977 */ 6978 pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail, 6979 sizeof(sq->tail)); 6980 } 6981 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); 6982 } 6983 } 6984 6985 static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data, 6986 unsigned size) 6987 { 6988 NvmeCtrl *n = (NvmeCtrl *)opaque; 6989 6990 trace_pci_nvme_mmio_write(addr, data, size); 6991 6992 if (pci_is_vf(&n->parent_obj) && !nvme_sctrl(n)->scs && 6993 addr != NVME_REG_CSTS) { 6994 trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size); 6995 return; 6996 } 6997 6998 if (addr < sizeof(n->bar)) { 6999 nvme_write_bar(n, addr, data, size); 7000 } else { 7001 nvme_process_db(n, addr, data); 7002 } 7003 } 7004 7005 static const MemoryRegionOps nvme_mmio_ops = { 7006 .read = nvme_mmio_read, 7007 .write = nvme_mmio_write, 7008 .endianness = DEVICE_LITTLE_ENDIAN, 7009 .impl = { 7010 .min_access_size = 2, 7011 .max_access_size = 8, 7012 }, 7013 }; 7014 7015 static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data, 7016 unsigned size) 7017 { 7018 NvmeCtrl *n = (NvmeCtrl *)opaque; 7019 stn_le_p(&n->cmb.buf[addr], size, data); 7020 } 7021 7022 static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size) 7023 { 7024 NvmeCtrl *n = (NvmeCtrl *)opaque; 7025 return ldn_le_p(&n->cmb.buf[addr], size); 7026 } 7027 7028 static const MemoryRegionOps nvme_cmb_ops = { 7029 .read = nvme_cmb_read, 7030 .write = nvme_cmb_write, 7031 .endianness = DEVICE_LITTLE_ENDIAN, 7032 .impl = { 7033 .min_access_size = 1, 7034 .max_access_size = 8, 7035 }, 7036 }; 7037 7038 static void nvme_check_constraints(NvmeCtrl *n, Error **errp) 7039 { 7040 NvmeParams *params = &n->params; 7041 7042 if (params->num_queues) { 7043 warn_report("num_queues is deprecated; please use max_ioqpairs " 7044 "instead"); 7045 7046 params->max_ioqpairs = params->num_queues - 1; 7047 } 7048 7049 if (n->namespace.blkconf.blk && n->subsys) { 7050 error_setg(errp, "subsystem support is unavailable with legacy " 7051 "namespace ('drive' property)"); 7052 return; 7053 } 7054 7055 if (params->max_ioqpairs < 1 || 7056 params->max_ioqpairs > NVME_MAX_IOQPAIRS) { 7057 error_setg(errp, "max_ioqpairs must be between 1 and %d", 7058 NVME_MAX_IOQPAIRS); 7059 return; 7060 } 7061 7062 if (params->msix_qsize < 1 || 7063 params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) { 7064 error_setg(errp, "msix_qsize must be between 1 and %d", 7065 PCI_MSIX_FLAGS_QSIZE + 1); 7066 return; 7067 } 7068 7069 if (!params->serial) { 7070 error_setg(errp, "serial property not set"); 7071 return; 7072 } 7073 7074 if (n->pmr.dev) { 7075 if (host_memory_backend_is_mapped(n->pmr.dev)) { 7076 error_setg(errp, "can't use already busy memdev: %s", 7077 object_get_canonical_path_component(OBJECT(n->pmr.dev))); 7078 return; 7079 } 7080 7081 if (!is_power_of_2(n->pmr.dev->size)) { 7082 error_setg(errp, "pmr backend size needs to be power of 2 in size"); 7083 return; 7084 } 7085 7086 host_memory_backend_set_mapped(n->pmr.dev, true); 7087 } 7088 7089 if (n->params.zasl > n->params.mdts) { 7090 error_setg(errp, "zoned.zasl (Zone Append Size Limit) must be less " 7091 "than or equal to mdts (Maximum Data Transfer Size)"); 7092 return; 7093 } 7094 7095 if (!n->params.vsl) { 7096 error_setg(errp, "vsl must be non-zero"); 7097 return; 7098 } 7099 7100 if (params->sriov_max_vfs) { 7101 if (!n->subsys) { 7102 error_setg(errp, "subsystem is required for the use of SR-IOV"); 7103 return; 7104 } 7105 7106 if (params->sriov_max_vfs > NVME_MAX_VFS) { 7107 error_setg(errp, "sriov_max_vfs must be between 0 and %d", 7108 NVME_MAX_VFS); 7109 return; 7110 } 7111 7112 if (params->cmb_size_mb) { 7113 error_setg(errp, "CMB is not supported with SR-IOV"); 7114 return; 7115 } 7116 7117 if (n->pmr.dev) { 7118 error_setg(errp, "PMR is not supported with SR-IOV"); 7119 return; 7120 } 7121 7122 if (!params->sriov_vq_flexible || !params->sriov_vi_flexible) { 7123 error_setg(errp, "both sriov_vq_flexible and sriov_vi_flexible" 7124 " must be set for the use of SR-IOV"); 7125 return; 7126 } 7127 7128 if (params->sriov_vq_flexible < params->sriov_max_vfs * 2) { 7129 error_setg(errp, "sriov_vq_flexible must be greater than or equal" 7130 " to %d (sriov_max_vfs * 2)", params->sriov_max_vfs * 2); 7131 return; 7132 } 7133 7134 if (params->max_ioqpairs < params->sriov_vq_flexible + 2) { 7135 error_setg(errp, "(max_ioqpairs - sriov_vq_flexible) must be" 7136 " greater than or equal to 2"); 7137 return; 7138 } 7139 7140 if (params->sriov_vi_flexible < params->sriov_max_vfs) { 7141 error_setg(errp, "sriov_vi_flexible must be greater than or equal" 7142 " to %d (sriov_max_vfs)", params->sriov_max_vfs); 7143 return; 7144 } 7145 7146 if (params->msix_qsize < params->sriov_vi_flexible + 1) { 7147 error_setg(errp, "(msix_qsize - sriov_vi_flexible) must be" 7148 " greater than or equal to 1"); 7149 return; 7150 } 7151 7152 if (params->sriov_max_vi_per_vf && 7153 (params->sriov_max_vi_per_vf - 1) % NVME_VF_RES_GRANULARITY) { 7154 error_setg(errp, "sriov_max_vi_per_vf must meet:" 7155 " (sriov_max_vi_per_vf - 1) %% %d == 0 and" 7156 " sriov_max_vi_per_vf >= 1", NVME_VF_RES_GRANULARITY); 7157 return; 7158 } 7159 7160 if (params->sriov_max_vq_per_vf && 7161 (params->sriov_max_vq_per_vf < 2 || 7162 (params->sriov_max_vq_per_vf - 1) % NVME_VF_RES_GRANULARITY)) { 7163 error_setg(errp, "sriov_max_vq_per_vf must meet:" 7164 " (sriov_max_vq_per_vf - 1) %% %d == 0 and" 7165 " sriov_max_vq_per_vf >= 2", NVME_VF_RES_GRANULARITY); 7166 return; 7167 } 7168 } 7169 } 7170 7171 static void nvme_init_state(NvmeCtrl *n) 7172 { 7173 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; 7174 NvmeSecCtrlList *list = &n->sec_ctrl_list; 7175 NvmeSecCtrlEntry *sctrl; 7176 uint8_t max_vfs; 7177 int i; 7178 7179 if (pci_is_vf(&n->parent_obj)) { 7180 sctrl = nvme_sctrl(n); 7181 max_vfs = 0; 7182 n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; 7183 n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1; 7184 } else { 7185 max_vfs = n->params.sriov_max_vfs; 7186 n->conf_ioqpairs = n->params.max_ioqpairs; 7187 n->conf_msix_qsize = n->params.msix_qsize; 7188 } 7189 7190 n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1); 7191 n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1); 7192 n->temperature = NVME_TEMPERATURE; 7193 n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING; 7194 n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 7195 n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1); 7196 QTAILQ_INIT(&n->aer_queue); 7197 7198 list->numcntl = cpu_to_le16(max_vfs); 7199 for (i = 0; i < max_vfs; i++) { 7200 sctrl = &list->sec[i]; 7201 sctrl->pcid = cpu_to_le16(n->cntlid); 7202 sctrl->vfn = cpu_to_le16(i + 1); 7203 } 7204 7205 cap->cntlid = cpu_to_le16(n->cntlid); 7206 cap->crt = NVME_CRT_VQ | NVME_CRT_VI; 7207 7208 if (pci_is_vf(&n->parent_obj)) { 7209 cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs); 7210 } else { 7211 cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs - 7212 n->params.sriov_vq_flexible); 7213 cap->vqfrt = cpu_to_le32(n->params.sriov_vq_flexible); 7214 cap->vqrfap = cap->vqfrt; 7215 cap->vqgran = cpu_to_le16(NVME_VF_RES_GRANULARITY); 7216 cap->vqfrsm = n->params.sriov_max_vq_per_vf ? 7217 cpu_to_le16(n->params.sriov_max_vq_per_vf) : 7218 cap->vqfrt / MAX(max_vfs, 1); 7219 } 7220 7221 if (pci_is_vf(&n->parent_obj)) { 7222 cap->viprt = cpu_to_le16(n->conf_msix_qsize); 7223 } else { 7224 cap->viprt = cpu_to_le16(n->params.msix_qsize - 7225 n->params.sriov_vi_flexible); 7226 cap->vifrt = cpu_to_le32(n->params.sriov_vi_flexible); 7227 cap->virfap = cap->vifrt; 7228 cap->vigran = cpu_to_le16(NVME_VF_RES_GRANULARITY); 7229 cap->vifrsm = n->params.sriov_max_vi_per_vf ? 7230 cpu_to_le16(n->params.sriov_max_vi_per_vf) : 7231 cap->vifrt / MAX(max_vfs, 1); 7232 } 7233 } 7234 7235 static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev) 7236 { 7237 uint64_t cmb_size = n->params.cmb_size_mb * MiB; 7238 uint64_t cap = ldq_le_p(&n->bar.cap); 7239 7240 n->cmb.buf = g_malloc0(cmb_size); 7241 memory_region_init_io(&n->cmb.mem, OBJECT(n), &nvme_cmb_ops, n, 7242 "nvme-cmb", cmb_size); 7243 pci_register_bar(pci_dev, NVME_CMB_BIR, 7244 PCI_BASE_ADDRESS_SPACE_MEMORY | 7245 PCI_BASE_ADDRESS_MEM_TYPE_64 | 7246 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->cmb.mem); 7247 7248 NVME_CAP_SET_CMBS(cap, 1); 7249 stq_le_p(&n->bar.cap, cap); 7250 7251 if (n->params.legacy_cmb) { 7252 nvme_cmb_enable_regs(n); 7253 n->cmb.cmse = true; 7254 } 7255 } 7256 7257 static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev) 7258 { 7259 uint32_t pmrcap = ldl_le_p(&n->bar.pmrcap); 7260 7261 NVME_PMRCAP_SET_RDS(pmrcap, 1); 7262 NVME_PMRCAP_SET_WDS(pmrcap, 1); 7263 NVME_PMRCAP_SET_BIR(pmrcap, NVME_PMR_BIR); 7264 /* Turn on bit 1 support */ 7265 NVME_PMRCAP_SET_PMRWBM(pmrcap, 0x02); 7266 NVME_PMRCAP_SET_CMSS(pmrcap, 1); 7267 stl_le_p(&n->bar.pmrcap, pmrcap); 7268 7269 pci_register_bar(pci_dev, NVME_PMR_BIR, 7270 PCI_BASE_ADDRESS_SPACE_MEMORY | 7271 PCI_BASE_ADDRESS_MEM_TYPE_64 | 7272 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmr.dev->mr); 7273 7274 memory_region_set_enabled(&n->pmr.dev->mr, false); 7275 } 7276 7277 static uint64_t nvme_bar_size(unsigned total_queues, unsigned total_irqs, 7278 unsigned *msix_table_offset, 7279 unsigned *msix_pba_offset) 7280 { 7281 uint64_t bar_size, msix_table_size, msix_pba_size; 7282 7283 bar_size = sizeof(NvmeBar) + 2 * total_queues * NVME_DB_SIZE; 7284 bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); 7285 7286 if (msix_table_offset) { 7287 *msix_table_offset = bar_size; 7288 } 7289 7290 msix_table_size = PCI_MSIX_ENTRY_SIZE * total_irqs; 7291 bar_size += msix_table_size; 7292 bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); 7293 7294 if (msix_pba_offset) { 7295 *msix_pba_offset = bar_size; 7296 } 7297 7298 msix_pba_size = QEMU_ALIGN_UP(total_irqs, 64) / 8; 7299 bar_size += msix_pba_size; 7300 7301 bar_size = pow2ceil(bar_size); 7302 return bar_size; 7303 } 7304 7305 static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset) 7306 { 7307 uint16_t vf_dev_id = n->params.use_intel_id ? 7308 PCI_DEVICE_ID_INTEL_NVME : PCI_DEVICE_ID_REDHAT_NVME; 7309 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; 7310 uint64_t bar_size = nvme_bar_size(le16_to_cpu(cap->vqfrsm), 7311 le16_to_cpu(cap->vifrsm), 7312 NULL, NULL); 7313 7314 pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id, 7315 n->params.sriov_max_vfs, n->params.sriov_max_vfs, 7316 NVME_VF_OFFSET, NVME_VF_STRIDE); 7317 7318 pcie_sriov_pf_init_vf_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | 7319 PCI_BASE_ADDRESS_MEM_TYPE_64, bar_size); 7320 } 7321 7322 static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset) 7323 { 7324 Error *err = NULL; 7325 int ret; 7326 7327 ret = pci_add_capability(pci_dev, PCI_CAP_ID_PM, offset, 7328 PCI_PM_SIZEOF, &err); 7329 if (err) { 7330 error_report_err(err); 7331 return ret; 7332 } 7333 7334 pci_set_word(pci_dev->config + offset + PCI_PM_PMC, 7335 PCI_PM_CAP_VER_1_2); 7336 pci_set_word(pci_dev->config + offset + PCI_PM_CTRL, 7337 PCI_PM_CTRL_NO_SOFT_RESET); 7338 pci_set_word(pci_dev->wmask + offset + PCI_PM_CTRL, 7339 PCI_PM_CTRL_STATE_MASK); 7340 7341 return 0; 7342 } 7343 7344 static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) 7345 { 7346 uint8_t *pci_conf = pci_dev->config; 7347 uint64_t bar_size; 7348 unsigned msix_table_offset, msix_pba_offset; 7349 int ret; 7350 7351 Error *err = NULL; 7352 7353 pci_conf[PCI_INTERRUPT_PIN] = 1; 7354 pci_config_set_prog_interface(pci_conf, 0x2); 7355 7356 if (n->params.use_intel_id) { 7357 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); 7358 pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_INTEL_NVME); 7359 } else { 7360 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REDHAT); 7361 pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_REDHAT_NVME); 7362 } 7363 7364 pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS); 7365 nvme_add_pm_capability(pci_dev, 0x60); 7366 pcie_endpoint_cap_init(pci_dev, 0x80); 7367 pcie_cap_flr_init(pci_dev); 7368 if (n->params.sriov_max_vfs) { 7369 pcie_ari_init(pci_dev, 0x100, 1); 7370 } 7371 7372 /* add one to max_ioqpairs to account for the admin queue pair */ 7373 bar_size = nvme_bar_size(n->params.max_ioqpairs + 1, n->params.msix_qsize, 7374 &msix_table_offset, &msix_pba_offset); 7375 7376 memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); 7377 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", 7378 msix_table_offset); 7379 memory_region_add_subregion(&n->bar0, 0, &n->iomem); 7380 7381 if (pci_is_vf(pci_dev)) { 7382 pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0); 7383 } else { 7384 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | 7385 PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); 7386 } 7387 ret = msix_init(pci_dev, n->params.msix_qsize, 7388 &n->bar0, 0, msix_table_offset, 7389 &n->bar0, 0, msix_pba_offset, 0, &err); 7390 if (ret < 0) { 7391 if (ret == -ENOTSUP) { 7392 warn_report_err(err); 7393 } else { 7394 error_propagate(errp, err); 7395 return ret; 7396 } 7397 } 7398 7399 nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); 7400 7401 if (n->params.cmb_size_mb) { 7402 nvme_init_cmb(n, pci_dev); 7403 } 7404 7405 if (n->pmr.dev) { 7406 nvme_init_pmr(n, pci_dev); 7407 } 7408 7409 if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) { 7410 nvme_init_sriov(n, pci_dev, 0x120); 7411 } 7412 7413 return 0; 7414 } 7415 7416 static void nvme_init_subnqn(NvmeCtrl *n) 7417 { 7418 NvmeSubsystem *subsys = n->subsys; 7419 NvmeIdCtrl *id = &n->id_ctrl; 7420 7421 if (!subsys) { 7422 snprintf((char *)id->subnqn, sizeof(id->subnqn), 7423 "nqn.2019-08.org.qemu:%s", n->params.serial); 7424 } else { 7425 pstrcpy((char *)id->subnqn, sizeof(id->subnqn), (char*)subsys->subnqn); 7426 } 7427 } 7428 7429 static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) 7430 { 7431 NvmeIdCtrl *id = &n->id_ctrl; 7432 uint8_t *pci_conf = pci_dev->config; 7433 uint64_t cap = ldq_le_p(&n->bar.cap); 7434 NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); 7435 7436 id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); 7437 id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); 7438 strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' '); 7439 strpadcpy((char *)id->fr, sizeof(id->fr), QEMU_VERSION, ' '); 7440 strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' '); 7441 7442 id->cntlid = cpu_to_le16(n->cntlid); 7443 7444 id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); 7445 id->ctratt |= cpu_to_le32(NVME_CTRATT_ELBAS); 7446 7447 id->rab = 6; 7448 7449 if (n->params.use_intel_id) { 7450 id->ieee[0] = 0xb3; 7451 id->ieee[1] = 0x02; 7452 id->ieee[2] = 0x00; 7453 } else { 7454 id->ieee[0] = 0x00; 7455 id->ieee[1] = 0x54; 7456 id->ieee[2] = 0x52; 7457 } 7458 7459 id->mdts = n->params.mdts; 7460 id->ver = cpu_to_le32(NVME_SPEC_VER); 7461 id->oacs = 7462 cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF); 7463 id->cntrltype = 0x1; 7464 7465 /* 7466 * Because the controller always completes the Abort command immediately, 7467 * there can never be more than one concurrently executing Abort command, 7468 * so this value is never used for anything. Note that there can easily be 7469 * many Abort commands in the queues, but they are not considered 7470 * "executing" until processed by nvme_abort. 7471 * 7472 * The specification recommends a value of 3 for Abort Command Limit (four 7473 * concurrently outstanding Abort commands), so lets use that though it is 7474 * inconsequential. 7475 */ 7476 id->acl = 3; 7477 id->aerl = n->params.aerl; 7478 id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO; 7479 id->lpa = NVME_LPA_NS_SMART | NVME_LPA_CSE | NVME_LPA_EXTENDED; 7480 7481 /* recommended default value (~70 C) */ 7482 id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING); 7483 id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL); 7484 7485 id->sqes = (0x6 << 4) | 0x6; 7486 id->cqes = (0x4 << 4) | 0x4; 7487 id->nn = cpu_to_le32(NVME_MAX_NAMESPACES); 7488 id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP | 7489 NVME_ONCS_FEATURES | NVME_ONCS_DSM | 7490 NVME_ONCS_COMPARE | NVME_ONCS_COPY); 7491 7492 /* 7493 * NOTE: If this device ever supports a command set that does NOT use 0x0 7494 * as a Flush-equivalent operation, support for the broadcast NSID in Flush 7495 * should probably be removed. 7496 * 7497 * See comment in nvme_io_cmd. 7498 */ 7499 id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT; 7500 7501 id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1); 7502 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN); 7503 7504 nvme_init_subnqn(n); 7505 7506 id->psd[0].mp = cpu_to_le16(0x9c4); 7507 id->psd[0].enlat = cpu_to_le32(0x10); 7508 id->psd[0].exlat = cpu_to_le32(0x4); 7509 7510 if (n->subsys) { 7511 id->cmic |= NVME_CMIC_MULTI_CTRL; 7512 } 7513 7514 NVME_CAP_SET_MQES(cap, 0x7ff); 7515 NVME_CAP_SET_CQR(cap, 1); 7516 NVME_CAP_SET_TO(cap, 0xf); 7517 NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_NVM); 7518 NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_CSI_SUPP); 7519 NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_ADMIN_ONLY); 7520 NVME_CAP_SET_MPSMAX(cap, 4); 7521 NVME_CAP_SET_CMBS(cap, n->params.cmb_size_mb ? 1 : 0); 7522 NVME_CAP_SET_PMRS(cap, n->pmr.dev ? 1 : 0); 7523 stq_le_p(&n->bar.cap, cap); 7524 7525 stl_le_p(&n->bar.vs, NVME_SPEC_VER); 7526 n->bar.intmc = n->bar.intms = 0; 7527 7528 if (pci_is_vf(&n->parent_obj) && !sctrl->scs) { 7529 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); 7530 } 7531 } 7532 7533 static int nvme_init_subsys(NvmeCtrl *n, Error **errp) 7534 { 7535 int cntlid; 7536 7537 if (!n->subsys) { 7538 return 0; 7539 } 7540 7541 cntlid = nvme_subsys_register_ctrl(n, errp); 7542 if (cntlid < 0) { 7543 return -1; 7544 } 7545 7546 n->cntlid = cntlid; 7547 7548 return 0; 7549 } 7550 7551 void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns) 7552 { 7553 uint32_t nsid = ns->params.nsid; 7554 assert(nsid && nsid <= NVME_MAX_NAMESPACES); 7555 7556 n->namespaces[nsid] = ns; 7557 ns->attached++; 7558 7559 n->dmrsl = MIN_NON_ZERO(n->dmrsl, 7560 BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); 7561 } 7562 7563 static void nvme_realize(PCIDevice *pci_dev, Error **errp) 7564 { 7565 NvmeCtrl *n = NVME(pci_dev); 7566 NvmeNamespace *ns; 7567 Error *local_err = NULL; 7568 NvmeCtrl *pn = NVME(pcie_sriov_get_pf(pci_dev)); 7569 7570 if (pci_is_vf(pci_dev)) { 7571 /* 7572 * VFs derive settings from the parent. PF's lifespan exceeds 7573 * that of VF's, so it's safe to share params.serial. 7574 */ 7575 memcpy(&n->params, &pn->params, sizeof(NvmeParams)); 7576 n->subsys = pn->subsys; 7577 } 7578 7579 nvme_check_constraints(n, &local_err); 7580 if (local_err) { 7581 error_propagate(errp, local_err); 7582 return; 7583 } 7584 7585 qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, 7586 &pci_dev->qdev, n->parent_obj.qdev.id); 7587 7588 if (nvme_init_subsys(n, errp)) { 7589 error_propagate(errp, local_err); 7590 return; 7591 } 7592 nvme_init_state(n); 7593 if (nvme_init_pci(n, pci_dev, errp)) { 7594 return; 7595 } 7596 nvme_init_ctrl(n, pci_dev); 7597 7598 /* setup a namespace if the controller drive property was given */ 7599 if (n->namespace.blkconf.blk) { 7600 ns = &n->namespace; 7601 ns->params.nsid = 1; 7602 7603 if (nvme_ns_setup(ns, errp)) { 7604 return; 7605 } 7606 7607 nvme_attach_ns(n, ns); 7608 } 7609 } 7610 7611 static void nvme_exit(PCIDevice *pci_dev) 7612 { 7613 NvmeCtrl *n = NVME(pci_dev); 7614 NvmeNamespace *ns; 7615 int i; 7616 7617 nvme_ctrl_reset(n, NVME_RESET_FUNCTION); 7618 7619 if (n->subsys) { 7620 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 7621 ns = nvme_ns(n, i); 7622 if (ns) { 7623 ns->attached--; 7624 } 7625 } 7626 7627 nvme_subsys_unregister_ctrl(n->subsys, n); 7628 } 7629 7630 g_free(n->cq); 7631 g_free(n->sq); 7632 g_free(n->aer_reqs); 7633 7634 if (n->params.cmb_size_mb) { 7635 g_free(n->cmb.buf); 7636 } 7637 7638 if (n->pmr.dev) { 7639 host_memory_backend_set_mapped(n->pmr.dev, false); 7640 } 7641 7642 if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) { 7643 pcie_sriov_pf_exit(pci_dev); 7644 } 7645 7646 msix_uninit(pci_dev, &n->bar0, &n->bar0); 7647 memory_region_del_subregion(&n->bar0, &n->iomem); 7648 } 7649 7650 static Property nvme_props[] = { 7651 DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf), 7652 DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmr.dev, TYPE_MEMORY_BACKEND, 7653 HostMemoryBackend *), 7654 DEFINE_PROP_LINK("subsys", NvmeCtrl, subsys, TYPE_NVME_SUBSYS, 7655 NvmeSubsystem *), 7656 DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial), 7657 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0), 7658 DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0), 7659 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64), 7660 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65), 7661 DEFINE_PROP_UINT8("aerl", NvmeCtrl, params.aerl, 3), 7662 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64), 7663 DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7), 7664 DEFINE_PROP_UINT8("vsl", NvmeCtrl, params.vsl, 7), 7665 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false), 7666 DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false), 7667 DEFINE_PROP_BOOL("ioeventfd", NvmeCtrl, params.ioeventfd, true), 7668 DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0), 7669 DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl, 7670 params.auto_transition_zones, true), 7671 DEFINE_PROP_UINT8("sriov_max_vfs", NvmeCtrl, params.sriov_max_vfs, 0), 7672 DEFINE_PROP_UINT16("sriov_vq_flexible", NvmeCtrl, 7673 params.sriov_vq_flexible, 0), 7674 DEFINE_PROP_UINT16("sriov_vi_flexible", NvmeCtrl, 7675 params.sriov_vi_flexible, 0), 7676 DEFINE_PROP_UINT8("sriov_max_vi_per_vf", NvmeCtrl, 7677 params.sriov_max_vi_per_vf, 0), 7678 DEFINE_PROP_UINT8("sriov_max_vq_per_vf", NvmeCtrl, 7679 params.sriov_max_vq_per_vf, 0), 7680 DEFINE_PROP_END_OF_LIST(), 7681 }; 7682 7683 static void nvme_get_smart_warning(Object *obj, Visitor *v, const char *name, 7684 void *opaque, Error **errp) 7685 { 7686 NvmeCtrl *n = NVME(obj); 7687 uint8_t value = n->smart_critical_warning; 7688 7689 visit_type_uint8(v, name, &value, errp); 7690 } 7691 7692 static void nvme_set_smart_warning(Object *obj, Visitor *v, const char *name, 7693 void *opaque, Error **errp) 7694 { 7695 NvmeCtrl *n = NVME(obj); 7696 uint8_t value, old_value, cap = 0, index, event; 7697 7698 if (!visit_type_uint8(v, name, &value, errp)) { 7699 return; 7700 } 7701 7702 cap = NVME_SMART_SPARE | NVME_SMART_TEMPERATURE | NVME_SMART_RELIABILITY 7703 | NVME_SMART_MEDIA_READ_ONLY | NVME_SMART_FAILED_VOLATILE_MEDIA; 7704 if (NVME_CAP_PMRS(ldq_le_p(&n->bar.cap))) { 7705 cap |= NVME_SMART_PMR_UNRELIABLE; 7706 } 7707 7708 if ((value & cap) != value) { 7709 error_setg(errp, "unsupported smart critical warning bits: 0x%x", 7710 value & ~cap); 7711 return; 7712 } 7713 7714 old_value = n->smart_critical_warning; 7715 n->smart_critical_warning = value; 7716 7717 /* only inject new bits of smart critical warning */ 7718 for (index = 0; index < NVME_SMART_WARN_MAX; index++) { 7719 event = 1 << index; 7720 if (value & ~old_value & event) 7721 nvme_smart_event(n, event); 7722 } 7723 } 7724 7725 static void nvme_pci_reset(DeviceState *qdev) 7726 { 7727 PCIDevice *pci_dev = PCI_DEVICE(qdev); 7728 NvmeCtrl *n = NVME(pci_dev); 7729 7730 trace_pci_nvme_pci_reset(); 7731 nvme_ctrl_reset(n, NVME_RESET_FUNCTION); 7732 } 7733 7734 static void nvme_sriov_pre_write_ctrl(PCIDevice *dev, uint32_t address, 7735 uint32_t val, int len) 7736 { 7737 NvmeCtrl *n = NVME(dev); 7738 NvmeSecCtrlEntry *sctrl; 7739 uint16_t sriov_cap = dev->exp.sriov_cap; 7740 uint32_t off = address - sriov_cap; 7741 int i, num_vfs; 7742 7743 if (!sriov_cap) { 7744 return; 7745 } 7746 7747 if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) { 7748 if (!(val & PCI_SRIOV_CTRL_VFE)) { 7749 num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF); 7750 for (i = 0; i < num_vfs; i++) { 7751 sctrl = &n->sec_ctrl_list.sec[i]; 7752 nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); 7753 } 7754 } 7755 } 7756 } 7757 7758 static void nvme_pci_write_config(PCIDevice *dev, uint32_t address, 7759 uint32_t val, int len) 7760 { 7761 nvme_sriov_pre_write_ctrl(dev, address, val, len); 7762 pci_default_write_config(dev, address, val, len); 7763 pcie_cap_flr_write_config(dev, address, val, len); 7764 } 7765 7766 static const VMStateDescription nvme_vmstate = { 7767 .name = "nvme", 7768 .unmigratable = 1, 7769 }; 7770 7771 static void nvme_class_init(ObjectClass *oc, void *data) 7772 { 7773 DeviceClass *dc = DEVICE_CLASS(oc); 7774 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); 7775 7776 pc->realize = nvme_realize; 7777 pc->config_write = nvme_pci_write_config; 7778 pc->exit = nvme_exit; 7779 pc->class_id = PCI_CLASS_STORAGE_EXPRESS; 7780 pc->revision = 2; 7781 7782 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 7783 dc->desc = "Non-Volatile Memory Express"; 7784 device_class_set_props(dc, nvme_props); 7785 dc->vmsd = &nvme_vmstate; 7786 dc->reset = nvme_pci_reset; 7787 } 7788 7789 static void nvme_instance_init(Object *obj) 7790 { 7791 NvmeCtrl *n = NVME(obj); 7792 7793 device_add_bootindex_property(obj, &n->namespace.blkconf.bootindex, 7794 "bootindex", "/namespace@1,0", 7795 DEVICE(obj)); 7796 7797 object_property_add(obj, "smart_critical_warning", "uint8", 7798 nvme_get_smart_warning, 7799 nvme_set_smart_warning, NULL, NULL); 7800 } 7801 7802 static const TypeInfo nvme_info = { 7803 .name = TYPE_NVME, 7804 .parent = TYPE_PCI_DEVICE, 7805 .instance_size = sizeof(NvmeCtrl), 7806 .instance_init = nvme_instance_init, 7807 .class_init = nvme_class_init, 7808 .interfaces = (InterfaceInfo[]) { 7809 { INTERFACE_PCIE_DEVICE }, 7810 { } 7811 }, 7812 }; 7813 7814 static const TypeInfo nvme_bus_info = { 7815 .name = TYPE_NVME_BUS, 7816 .parent = TYPE_BUS, 7817 .instance_size = sizeof(NvmeBus), 7818 }; 7819 7820 static void nvme_register_types(void) 7821 { 7822 type_register_static(&nvme_info); 7823 type_register_static(&nvme_bus_info); 7824 } 7825 7826 type_init(nvme_register_types) 7827