1 /* 2 * QEMU NVM Express Controller 3 * 4 * Copyright (c) 2012, Intel Corporation 5 * 6 * Written by Keith Busch <keith.busch@intel.com> 7 * 8 * This code is licensed under the GNU GPL v2 or later. 9 */ 10 11 /** 12 * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e 13 * 14 * https://nvmexpress.org/developers/nvme-specification/ 15 * 16 * 17 * Notes on coding style 18 * --------------------- 19 * While QEMU coding style prefers lowercase hexadecimals in constants, the 20 * NVMe subsystem use thes format from the NVMe specifications in the comments 21 * (i.e. 'h' suffix instead of '0x' prefix). 22 * 23 * Usage 24 * ----- 25 * See docs/system/nvme.rst for extensive documentation. 26 * 27 * Add options: 28 * -drive file=<file>,if=none,id=<drive_id> 29 * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id> 30 * -device nvme,serial=<serial>,id=<bus_name>, \ 31 * cmb_size_mb=<cmb_size_mb[optional]>, \ 32 * [pmrdev=<mem_backend_file_id>,] \ 33 * max_ioqpairs=<N[optional]>, \ 34 * aerl=<N[optional]>,aer_max_queued=<N[optional]>, \ 35 * mdts=<N[optional]>,vsl=<N[optional]>, \ 36 * zoned.zasl=<N[optional]>, \ 37 * zoned.auto_transition=<on|off[optional]>, \ 38 * sriov_max_vfs=<N[optional]> \ 39 * sriov_vq_flexible=<N[optional]> \ 40 * sriov_vi_flexible=<N[optional]> \ 41 * sriov_max_vi_per_vf=<N[optional]> \ 42 * sriov_max_vq_per_vf=<N[optional]> \ 43 * subsys=<subsys_id> 44 * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\ 45 * zoned=<true|false[optional]>, \ 46 * subsys=<subsys_id>,shared=<true|false[optional]>, \ 47 * detached=<true|false[optional]>, \ 48 * zoned.zone_size=<N[optional]>, \ 49 * zoned.zone_capacity=<N[optional]>, \ 50 * zoned.descr_ext_size=<N[optional]>, \ 51 * zoned.max_active=<N[optional]>, \ 52 * zoned.max_open=<N[optional]>, \ 53 * zoned.cross_read=<true|false[optional]> 54 * 55 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at 56 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the 57 * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to 58 * always enable the CMBLOC and CMBSZ registers (v1.3 behavior). 59 * 60 * Enabling pmr emulation can be achieved by pointing to memory-backend-file. 61 * For example: 62 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \ 63 * size=<size> .... -device nvme,...,pmrdev=<mem_id> 64 * 65 * The PMR will use BAR 4/5 exclusively. 66 * 67 * To place controller(s) and namespace(s) to a subsystem, then provide 68 * nvme-subsys device as above. 69 * 70 * nvme subsystem device parameters 71 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 72 * - `nqn` 73 * This parameter provides the `<nqn_id>` part of the string 74 * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field 75 * of subsystem controllers. Note that `<nqn_id>` should be unique per 76 * subsystem, but this is not enforced by QEMU. If not specified, it will 77 * default to the value of the `id` parameter (`<subsys_id>`). 78 * 79 * nvme device parameters 80 * ~~~~~~~~~~~~~~~~~~~~~~ 81 * - `subsys` 82 * Specifying this parameter attaches the controller to the subsystem and 83 * the SUBNQN field in the controller will report the NQN of the subsystem 84 * device. This also enables multi controller capability represented in 85 * Identify Controller data structure in CMIC (Controller Multi-path I/O and 86 * Namespace Sharing Capabilities). 87 * 88 * - `aerl` 89 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number 90 * of concurrently outstanding Asynchronous Event Request commands support 91 * by the controller. This is a 0's based value. 92 * 93 * - `aer_max_queued` 94 * This is the maximum number of events that the device will enqueue for 95 * completion when there are no outstanding AERs. When the maximum number of 96 * enqueued events are reached, subsequent events will be dropped. 97 * 98 * - `mdts` 99 * Indicates the maximum data transfer size for a command that transfers data 100 * between host-accessible memory and the controller. The value is specified 101 * as a power of two (2^n) and is in units of the minimum memory page size 102 * (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB). 103 * 104 * - `vsl` 105 * Indicates the maximum data size limit for the Verify command. Like `mdts`, 106 * this value is specified as a power of two (2^n) and is in units of the 107 * minimum memory page size (CAP.MPSMIN). The default value is 7 (i.e. 512 108 * KiB). 109 * 110 * - `zoned.zasl` 111 * Indicates the maximum data transfer size for the Zone Append command. Like 112 * `mdts`, the value is specified as a power of two (2^n) and is in units of 113 * the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e. 114 * defaulting to the value of `mdts`). 115 * 116 * - `zoned.auto_transition` 117 * Indicates if zones in zone state implicitly opened can be automatically 118 * transitioned to zone state closed for resource management purposes. 119 * Defaults to 'on'. 120 * 121 * - `sriov_max_vfs` 122 * Indicates the maximum number of PCIe virtual functions supported 123 * by the controller. The default value is 0. Specifying a non-zero value 124 * enables reporting of both SR-IOV and ARI capabilities by the NVMe device. 125 * Virtual function controllers will not report SR-IOV capability. 126 * 127 * NOTE: Single Root I/O Virtualization support is experimental. 128 * All the related parameters may be subject to change. 129 * 130 * - `sriov_vq_flexible` 131 * Indicates the total number of flexible queue resources assignable to all 132 * the secondary controllers. Implicitly sets the number of primary 133 * controller's private resources to `(max_ioqpairs - sriov_vq_flexible)`. 134 * 135 * - `sriov_vi_flexible` 136 * Indicates the total number of flexible interrupt resources assignable to 137 * all the secondary controllers. Implicitly sets the number of primary 138 * controller's private resources to `(msix_qsize - sriov_vi_flexible)`. 139 * 140 * - `sriov_max_vi_per_vf` 141 * Indicates the maximum number of virtual interrupt resources assignable 142 * to a secondary controller. The default 0 resolves to 143 * `(sriov_vi_flexible / sriov_max_vfs)`. 144 * 145 * - `sriov_max_vq_per_vf` 146 * Indicates the maximum number of virtual queue resources assignable to 147 * a secondary controller. The default 0 resolves to 148 * `(sriov_vq_flexible / sriov_max_vfs)`. 149 * 150 * nvme namespace device parameters 151 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 152 * - `shared` 153 * When the parent nvme device (as defined explicitly by the 'bus' parameter 154 * or implicitly by the most recently defined NvmeBus) is linked to an 155 * nvme-subsys device, the namespace will be attached to all controllers in 156 * the subsystem. If set to 'off' (the default), the namespace will remain a 157 * private namespace and may only be attached to a single controller at a 158 * time. 159 * 160 * - `detached` 161 * This parameter is only valid together with the `subsys` parameter. If left 162 * at the default value (`false/off`), the namespace will be attached to all 163 * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the 164 * namespace will be available in the subsystem but not attached to any 165 * controllers. 166 * 167 * Setting `zoned` to true selects Zoned Command Set at the namespace. 168 * In this case, the following namespace properties are available to configure 169 * zoned operation: 170 * zoned.zone_size=<zone size in bytes, default: 128MiB> 171 * The number may be followed by K, M, G as in kilo-, mega- or giga-. 172 * 173 * zoned.zone_capacity=<zone capacity in bytes, default: zone size> 174 * The value 0 (default) forces zone capacity to be the same as zone 175 * size. The value of this property may not exceed zone size. 176 * 177 * zoned.descr_ext_size=<zone descriptor extension size, default 0> 178 * This value needs to be specified in 64B units. If it is zero, 179 * namespace(s) will not support zone descriptor extensions. 180 * 181 * zoned.max_active=<Maximum Active Resources (zones), default: 0> 182 * The default value means there is no limit to the number of 183 * concurrently active zones. 184 * 185 * zoned.max_open=<Maximum Open Resources (zones), default: 0> 186 * The default value means there is no limit to the number of 187 * concurrently open zones. 188 * 189 * zoned.cross_read=<enable RAZB, default: false> 190 * Setting this property to true enables Read Across Zone Boundaries. 191 */ 192 193 #include "qemu/osdep.h" 194 #include "qemu/cutils.h" 195 #include "qemu/error-report.h" 196 #include "qemu/log.h" 197 #include "qemu/units.h" 198 #include "qemu/range.h" 199 #include "qapi/error.h" 200 #include "qapi/visitor.h" 201 #include "sysemu/sysemu.h" 202 #include "sysemu/block-backend.h" 203 #include "sysemu/hostmem.h" 204 #include "hw/pci/msix.h" 205 #include "hw/pci/pcie_sriov.h" 206 #include "migration/vmstate.h" 207 208 #include "nvme.h" 209 #include "dif.h" 210 #include "trace.h" 211 212 #define NVME_MAX_IOQPAIRS 0xffff 213 #define NVME_DB_SIZE 4 214 #define NVME_SPEC_VER 0x00010400 215 #define NVME_CMB_BIR 2 216 #define NVME_PMR_BIR 4 217 #define NVME_TEMPERATURE 0x143 218 #define NVME_TEMPERATURE_WARNING 0x157 219 #define NVME_TEMPERATURE_CRITICAL 0x175 220 #define NVME_NUM_FW_SLOTS 1 221 #define NVME_DEFAULT_MAX_ZA_SIZE (128 * KiB) 222 #define NVME_MAX_VFS 127 223 #define NVME_VF_RES_GRANULARITY 1 224 #define NVME_VF_OFFSET 0x1 225 #define NVME_VF_STRIDE 1 226 227 #define NVME_GUEST_ERR(trace, fmt, ...) \ 228 do { \ 229 (trace_##trace)(__VA_ARGS__); \ 230 qemu_log_mask(LOG_GUEST_ERROR, #trace \ 231 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \ 232 } while (0) 233 234 static const bool nvme_feature_support[NVME_FID_MAX] = { 235 [NVME_ARBITRATION] = true, 236 [NVME_POWER_MANAGEMENT] = true, 237 [NVME_TEMPERATURE_THRESHOLD] = true, 238 [NVME_ERROR_RECOVERY] = true, 239 [NVME_VOLATILE_WRITE_CACHE] = true, 240 [NVME_NUMBER_OF_QUEUES] = true, 241 [NVME_INTERRUPT_COALESCING] = true, 242 [NVME_INTERRUPT_VECTOR_CONF] = true, 243 [NVME_WRITE_ATOMICITY] = true, 244 [NVME_ASYNCHRONOUS_EVENT_CONF] = true, 245 [NVME_TIMESTAMP] = true, 246 [NVME_HOST_BEHAVIOR_SUPPORT] = true, 247 [NVME_COMMAND_SET_PROFILE] = true, 248 [NVME_FDP_MODE] = true, 249 [NVME_FDP_EVENTS] = true, 250 }; 251 252 static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { 253 [NVME_TEMPERATURE_THRESHOLD] = NVME_FEAT_CAP_CHANGE, 254 [NVME_ERROR_RECOVERY] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS, 255 [NVME_VOLATILE_WRITE_CACHE] = NVME_FEAT_CAP_CHANGE, 256 [NVME_NUMBER_OF_QUEUES] = NVME_FEAT_CAP_CHANGE, 257 [NVME_ASYNCHRONOUS_EVENT_CONF] = NVME_FEAT_CAP_CHANGE, 258 [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE, 259 [NVME_HOST_BEHAVIOR_SUPPORT] = NVME_FEAT_CAP_CHANGE, 260 [NVME_COMMAND_SET_PROFILE] = NVME_FEAT_CAP_CHANGE, 261 [NVME_FDP_MODE] = NVME_FEAT_CAP_CHANGE, 262 [NVME_FDP_EVENTS] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS, 263 }; 264 265 static const uint32_t nvme_cse_acs[256] = { 266 [NVME_ADM_CMD_DELETE_SQ] = NVME_CMD_EFF_CSUPP, 267 [NVME_ADM_CMD_CREATE_SQ] = NVME_CMD_EFF_CSUPP, 268 [NVME_ADM_CMD_GET_LOG_PAGE] = NVME_CMD_EFF_CSUPP, 269 [NVME_ADM_CMD_DELETE_CQ] = NVME_CMD_EFF_CSUPP, 270 [NVME_ADM_CMD_CREATE_CQ] = NVME_CMD_EFF_CSUPP, 271 [NVME_ADM_CMD_IDENTIFY] = NVME_CMD_EFF_CSUPP, 272 [NVME_ADM_CMD_ABORT] = NVME_CMD_EFF_CSUPP, 273 [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP, 274 [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP, 275 [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP, 276 [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC, 277 [NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP, 278 [NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP, 279 [NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 280 [NVME_ADM_CMD_DIRECTIVE_RECV] = NVME_CMD_EFF_CSUPP, 281 [NVME_ADM_CMD_DIRECTIVE_SEND] = NVME_CMD_EFF_CSUPP, 282 }; 283 284 static const uint32_t nvme_cse_iocs_none[256]; 285 286 static const uint32_t nvme_cse_iocs_nvm[256] = { 287 [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 288 [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 289 [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 290 [NVME_CMD_READ] = NVME_CMD_EFF_CSUPP, 291 [NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 292 [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, 293 [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 294 [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, 295 [NVME_CMD_IO_MGMT_RECV] = NVME_CMD_EFF_CSUPP, 296 [NVME_CMD_IO_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 297 }; 298 299 static const uint32_t nvme_cse_iocs_zoned[256] = { 300 [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 301 [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 302 [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 303 [NVME_CMD_READ] = NVME_CMD_EFF_CSUPP, 304 [NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 305 [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, 306 [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 307 [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, 308 [NVME_CMD_ZONE_APPEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 309 [NVME_CMD_ZONE_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, 310 [NVME_CMD_ZONE_MGMT_RECV] = NVME_CMD_EFF_CSUPP, 311 }; 312 313 static void nvme_process_sq(void *opaque); 314 static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst); 315 static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n); 316 317 static uint16_t nvme_sqid(NvmeRequest *req) 318 { 319 return le16_to_cpu(req->sq->sqid); 320 } 321 322 static inline uint16_t nvme_make_pid(NvmeNamespace *ns, uint16_t rg, 323 uint16_t ph) 324 { 325 uint16_t rgif = ns->endgrp->fdp.rgif; 326 327 if (!rgif) { 328 return ph; 329 } 330 331 return (rg << (16 - rgif)) | ph; 332 } 333 334 static inline bool nvme_ph_valid(NvmeNamespace *ns, uint16_t ph) 335 { 336 return ph < ns->fdp.nphs; 337 } 338 339 static inline bool nvme_rg_valid(NvmeEnduranceGroup *endgrp, uint16_t rg) 340 { 341 return rg < endgrp->fdp.nrg; 342 } 343 344 static inline uint16_t nvme_pid2ph(NvmeNamespace *ns, uint16_t pid) 345 { 346 uint16_t rgif = ns->endgrp->fdp.rgif; 347 348 if (!rgif) { 349 return pid; 350 } 351 352 return pid & ((1 << (15 - rgif)) - 1); 353 } 354 355 static inline uint16_t nvme_pid2rg(NvmeNamespace *ns, uint16_t pid) 356 { 357 uint16_t rgif = ns->endgrp->fdp.rgif; 358 359 if (!rgif) { 360 return 0; 361 } 362 363 return pid >> (16 - rgif); 364 } 365 366 static inline bool nvme_parse_pid(NvmeNamespace *ns, uint16_t pid, 367 uint16_t *ph, uint16_t *rg) 368 { 369 *rg = nvme_pid2rg(ns, pid); 370 *ph = nvme_pid2ph(ns, pid); 371 372 return nvme_ph_valid(ns, *ph) && nvme_rg_valid(ns->endgrp, *rg); 373 } 374 375 static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone, 376 NvmeZoneState state) 377 { 378 if (QTAILQ_IN_USE(zone, entry)) { 379 switch (nvme_get_zone_state(zone)) { 380 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 381 QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry); 382 break; 383 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 384 QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); 385 break; 386 case NVME_ZONE_STATE_CLOSED: 387 QTAILQ_REMOVE(&ns->closed_zones, zone, entry); 388 break; 389 case NVME_ZONE_STATE_FULL: 390 QTAILQ_REMOVE(&ns->full_zones, zone, entry); 391 default: 392 ; 393 } 394 } 395 396 nvme_set_zone_state(zone, state); 397 398 switch (state) { 399 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 400 QTAILQ_INSERT_TAIL(&ns->exp_open_zones, zone, entry); 401 break; 402 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 403 QTAILQ_INSERT_TAIL(&ns->imp_open_zones, zone, entry); 404 break; 405 case NVME_ZONE_STATE_CLOSED: 406 QTAILQ_INSERT_TAIL(&ns->closed_zones, zone, entry); 407 break; 408 case NVME_ZONE_STATE_FULL: 409 QTAILQ_INSERT_TAIL(&ns->full_zones, zone, entry); 410 case NVME_ZONE_STATE_READ_ONLY: 411 break; 412 default: 413 zone->d.za = 0; 414 } 415 } 416 417 static uint16_t nvme_zns_check_resources(NvmeNamespace *ns, uint32_t act, 418 uint32_t opn, uint32_t zrwa) 419 { 420 if (ns->params.max_active_zones != 0 && 421 ns->nr_active_zones + act > ns->params.max_active_zones) { 422 trace_pci_nvme_err_insuff_active_res(ns->params.max_active_zones); 423 return NVME_ZONE_TOO_MANY_ACTIVE | NVME_DNR; 424 } 425 426 if (ns->params.max_open_zones != 0 && 427 ns->nr_open_zones + opn > ns->params.max_open_zones) { 428 trace_pci_nvme_err_insuff_open_res(ns->params.max_open_zones); 429 return NVME_ZONE_TOO_MANY_OPEN | NVME_DNR; 430 } 431 432 if (zrwa > ns->zns.numzrwa) { 433 return NVME_NOZRWA | NVME_DNR; 434 } 435 436 return NVME_SUCCESS; 437 } 438 439 /* 440 * Check if we can open a zone without exceeding open/active limits. 441 * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5). 442 */ 443 static uint16_t nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn) 444 { 445 return nvme_zns_check_resources(ns, act, opn, 0); 446 } 447 448 static NvmeFdpEvent *nvme_fdp_alloc_event(NvmeCtrl *n, NvmeFdpEventBuffer *ebuf) 449 { 450 NvmeFdpEvent *ret = NULL; 451 bool is_full = ebuf->next == ebuf->start && ebuf->nelems; 452 453 ret = &ebuf->events[ebuf->next++]; 454 if (unlikely(ebuf->next == NVME_FDP_MAX_EVENTS)) { 455 ebuf->next = 0; 456 } 457 if (is_full) { 458 ebuf->start = ebuf->next; 459 } else { 460 ebuf->nelems++; 461 } 462 463 memset(ret, 0, sizeof(NvmeFdpEvent)); 464 ret->timestamp = nvme_get_timestamp(n); 465 466 return ret; 467 } 468 469 static inline int log_event(NvmeRuHandle *ruh, uint8_t event_type) 470 { 471 return (ruh->event_filter >> nvme_fdp_evf_shifts[event_type]) & 0x1; 472 } 473 474 static bool nvme_update_ruh(NvmeCtrl *n, NvmeNamespace *ns, uint16_t pid) 475 { 476 NvmeEnduranceGroup *endgrp = ns->endgrp; 477 NvmeRuHandle *ruh; 478 NvmeReclaimUnit *ru; 479 NvmeFdpEvent *e = NULL; 480 uint16_t ph, rg, ruhid; 481 482 if (!nvme_parse_pid(ns, pid, &ph, &rg)) { 483 return false; 484 } 485 486 ruhid = ns->fdp.phs[ph]; 487 488 ruh = &endgrp->fdp.ruhs[ruhid]; 489 ru = &ruh->rus[rg]; 490 491 if (ru->ruamw) { 492 if (log_event(ruh, FDP_EVT_RU_NOT_FULLY_WRITTEN)) { 493 e = nvme_fdp_alloc_event(n, &endgrp->fdp.host_events); 494 e->type = FDP_EVT_RU_NOT_FULLY_WRITTEN; 495 e->flags = FDPEF_PIV | FDPEF_NSIDV | FDPEF_LV; 496 e->pid = cpu_to_le16(pid); 497 e->nsid = cpu_to_le32(ns->params.nsid); 498 e->rgid = cpu_to_le16(rg); 499 e->ruhid = cpu_to_le16(ruhid); 500 } 501 502 /* log (eventual) GC overhead of prematurely swapping the RU */ 503 nvme_fdp_stat_inc(&endgrp->fdp.mbmw, nvme_l2b(ns, ru->ruamw)); 504 } 505 506 ru->ruamw = ruh->ruamw; 507 508 return true; 509 } 510 511 static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr) 512 { 513 hwaddr hi, lo; 514 515 if (!n->cmb.cmse) { 516 return false; 517 } 518 519 lo = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; 520 hi = lo + int128_get64(n->cmb.mem.size); 521 522 return addr >= lo && addr < hi; 523 } 524 525 static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr) 526 { 527 hwaddr base = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; 528 return &n->cmb.buf[addr - base]; 529 } 530 531 static bool nvme_addr_is_pmr(NvmeCtrl *n, hwaddr addr) 532 { 533 hwaddr hi; 534 535 if (!n->pmr.cmse) { 536 return false; 537 } 538 539 hi = n->pmr.cba + int128_get64(n->pmr.dev->mr.size); 540 541 return addr >= n->pmr.cba && addr < hi; 542 } 543 544 static inline void *nvme_addr_to_pmr(NvmeCtrl *n, hwaddr addr) 545 { 546 return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba); 547 } 548 549 static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr) 550 { 551 hwaddr hi, lo; 552 553 /* 554 * The purpose of this check is to guard against invalid "local" access to 555 * the iomem (i.e. controller registers). Thus, we check against the range 556 * covered by the 'bar0' MemoryRegion since that is currently composed of 557 * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however, 558 * that if the device model is ever changed to allow the CMB to be located 559 * in BAR0 as well, then this must be changed. 560 */ 561 lo = n->bar0.addr; 562 hi = lo + int128_get64(n->bar0.size); 563 564 return addr >= lo && addr < hi; 565 } 566 567 static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) 568 { 569 hwaddr hi = addr + size - 1; 570 if (hi < addr) { 571 return 1; 572 } 573 574 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { 575 memcpy(buf, nvme_addr_to_cmb(n, addr), size); 576 return 0; 577 } 578 579 if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) { 580 memcpy(buf, nvme_addr_to_pmr(n, addr), size); 581 return 0; 582 } 583 584 return pci_dma_read(PCI_DEVICE(n), addr, buf, size); 585 } 586 587 static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, const void *buf, int size) 588 { 589 hwaddr hi = addr + size - 1; 590 if (hi < addr) { 591 return 1; 592 } 593 594 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { 595 memcpy(nvme_addr_to_cmb(n, addr), buf, size); 596 return 0; 597 } 598 599 if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) { 600 memcpy(nvme_addr_to_pmr(n, addr), buf, size); 601 return 0; 602 } 603 604 return pci_dma_write(PCI_DEVICE(n), addr, buf, size); 605 } 606 607 static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid) 608 { 609 return nsid && 610 (nsid == NVME_NSID_BROADCAST || nsid <= NVME_MAX_NAMESPACES); 611 } 612 613 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid) 614 { 615 return sqid < n->conf_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1; 616 } 617 618 static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid) 619 { 620 return cqid < n->conf_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1; 621 } 622 623 static void nvme_inc_cq_tail(NvmeCQueue *cq) 624 { 625 cq->tail++; 626 if (cq->tail >= cq->size) { 627 cq->tail = 0; 628 cq->phase = !cq->phase; 629 } 630 } 631 632 static void nvme_inc_sq_head(NvmeSQueue *sq) 633 { 634 sq->head = (sq->head + 1) % sq->size; 635 } 636 637 static uint8_t nvme_cq_full(NvmeCQueue *cq) 638 { 639 return (cq->tail + 1) % cq->size == cq->head; 640 } 641 642 static uint8_t nvme_sq_empty(NvmeSQueue *sq) 643 { 644 return sq->head == sq->tail; 645 } 646 647 static void nvme_irq_check(NvmeCtrl *n) 648 { 649 PCIDevice *pci = PCI_DEVICE(n); 650 uint32_t intms = ldl_le_p(&n->bar.intms); 651 652 if (msix_enabled(pci)) { 653 return; 654 } 655 if (~intms & n->irq_status) { 656 pci_irq_assert(pci); 657 } else { 658 pci_irq_deassert(pci); 659 } 660 } 661 662 static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq) 663 { 664 PCIDevice *pci = PCI_DEVICE(n); 665 666 if (cq->irq_enabled) { 667 if (msix_enabled(pci)) { 668 trace_pci_nvme_irq_msix(cq->vector); 669 msix_notify(pci, cq->vector); 670 } else { 671 trace_pci_nvme_irq_pin(); 672 assert(cq->vector < 32); 673 n->irq_status |= 1 << cq->vector; 674 nvme_irq_check(n); 675 } 676 } else { 677 trace_pci_nvme_irq_masked(); 678 } 679 } 680 681 static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq) 682 { 683 if (cq->irq_enabled) { 684 if (msix_enabled(PCI_DEVICE(n))) { 685 return; 686 } else { 687 assert(cq->vector < 32); 688 if (!n->cq_pending) { 689 n->irq_status &= ~(1 << cq->vector); 690 } 691 nvme_irq_check(n); 692 } 693 } 694 } 695 696 static void nvme_req_clear(NvmeRequest *req) 697 { 698 req->ns = NULL; 699 req->opaque = NULL; 700 req->aiocb = NULL; 701 memset(&req->cqe, 0x0, sizeof(req->cqe)); 702 req->status = NVME_SUCCESS; 703 } 704 705 static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma) 706 { 707 if (dma) { 708 pci_dma_sglist_init(&sg->qsg, PCI_DEVICE(n), 0); 709 sg->flags = NVME_SG_DMA; 710 } else { 711 qemu_iovec_init(&sg->iov, 0); 712 } 713 714 sg->flags |= NVME_SG_ALLOC; 715 } 716 717 static inline void nvme_sg_unmap(NvmeSg *sg) 718 { 719 if (!(sg->flags & NVME_SG_ALLOC)) { 720 return; 721 } 722 723 if (sg->flags & NVME_SG_DMA) { 724 qemu_sglist_destroy(&sg->qsg); 725 } else { 726 qemu_iovec_destroy(&sg->iov); 727 } 728 729 memset(sg, 0x0, sizeof(*sg)); 730 } 731 732 /* 733 * When metadata is transfered as extended LBAs, the DPTR mapped into `sg` 734 * holds both data and metadata. This function splits the data and metadata 735 * into two separate QSG/IOVs. 736 */ 737 static void nvme_sg_split(NvmeSg *sg, NvmeNamespace *ns, NvmeSg *data, 738 NvmeSg *mdata) 739 { 740 NvmeSg *dst = data; 741 uint32_t trans_len, count = ns->lbasz; 742 uint64_t offset = 0; 743 bool dma = sg->flags & NVME_SG_DMA; 744 size_t sge_len; 745 size_t sg_len = dma ? sg->qsg.size : sg->iov.size; 746 int sg_idx = 0; 747 748 assert(sg->flags & NVME_SG_ALLOC); 749 750 while (sg_len) { 751 sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; 752 753 trans_len = MIN(sg_len, count); 754 trans_len = MIN(trans_len, sge_len - offset); 755 756 if (dst) { 757 if (dma) { 758 qemu_sglist_add(&dst->qsg, sg->qsg.sg[sg_idx].base + offset, 759 trans_len); 760 } else { 761 qemu_iovec_add(&dst->iov, 762 sg->iov.iov[sg_idx].iov_base + offset, 763 trans_len); 764 } 765 } 766 767 sg_len -= trans_len; 768 count -= trans_len; 769 offset += trans_len; 770 771 if (count == 0) { 772 dst = (dst == data) ? mdata : data; 773 count = (dst == data) ? ns->lbasz : ns->lbaf.ms; 774 } 775 776 if (sge_len == offset) { 777 offset = 0; 778 sg_idx++; 779 } 780 } 781 } 782 783 static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, 784 size_t len) 785 { 786 if (!len) { 787 return NVME_SUCCESS; 788 } 789 790 trace_pci_nvme_map_addr_cmb(addr, len); 791 792 if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) { 793 return NVME_DATA_TRAS_ERROR; 794 } 795 796 qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len); 797 798 return NVME_SUCCESS; 799 } 800 801 static uint16_t nvme_map_addr_pmr(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, 802 size_t len) 803 { 804 if (!len) { 805 return NVME_SUCCESS; 806 } 807 808 if (!nvme_addr_is_pmr(n, addr) || !nvme_addr_is_pmr(n, addr + len - 1)) { 809 return NVME_DATA_TRAS_ERROR; 810 } 811 812 qemu_iovec_add(iov, nvme_addr_to_pmr(n, addr), len); 813 814 return NVME_SUCCESS; 815 } 816 817 static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len) 818 { 819 bool cmb = false, pmr = false; 820 821 if (!len) { 822 return NVME_SUCCESS; 823 } 824 825 trace_pci_nvme_map_addr(addr, len); 826 827 if (nvme_addr_is_iomem(n, addr)) { 828 return NVME_DATA_TRAS_ERROR; 829 } 830 831 if (nvme_addr_is_cmb(n, addr)) { 832 cmb = true; 833 } else if (nvme_addr_is_pmr(n, addr)) { 834 pmr = true; 835 } 836 837 if (cmb || pmr) { 838 if (sg->flags & NVME_SG_DMA) { 839 return NVME_INVALID_USE_OF_CMB | NVME_DNR; 840 } 841 842 if (sg->iov.niov + 1 > IOV_MAX) { 843 goto max_mappings_exceeded; 844 } 845 846 if (cmb) { 847 return nvme_map_addr_cmb(n, &sg->iov, addr, len); 848 } else { 849 return nvme_map_addr_pmr(n, &sg->iov, addr, len); 850 } 851 } 852 853 if (!(sg->flags & NVME_SG_DMA)) { 854 return NVME_INVALID_USE_OF_CMB | NVME_DNR; 855 } 856 857 if (sg->qsg.nsg + 1 > IOV_MAX) { 858 goto max_mappings_exceeded; 859 } 860 861 qemu_sglist_add(&sg->qsg, addr, len); 862 863 return NVME_SUCCESS; 864 865 max_mappings_exceeded: 866 NVME_GUEST_ERR(pci_nvme_ub_too_many_mappings, 867 "number of mappings exceed 1024"); 868 return NVME_INTERNAL_DEV_ERROR | NVME_DNR; 869 } 870 871 static inline bool nvme_addr_is_dma(NvmeCtrl *n, hwaddr addr) 872 { 873 return !(nvme_addr_is_cmb(n, addr) || nvme_addr_is_pmr(n, addr)); 874 } 875 876 static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1, 877 uint64_t prp2, uint32_t len) 878 { 879 hwaddr trans_len = n->page_size - (prp1 % n->page_size); 880 trans_len = MIN(len, trans_len); 881 int num_prps = (len >> n->page_bits) + 1; 882 uint16_t status; 883 int ret; 884 885 trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps); 886 887 nvme_sg_init(n, sg, nvme_addr_is_dma(n, prp1)); 888 889 status = nvme_map_addr(n, sg, prp1, trans_len); 890 if (status) { 891 goto unmap; 892 } 893 894 len -= trans_len; 895 if (len) { 896 if (len > n->page_size) { 897 uint64_t prp_list[n->max_prp_ents]; 898 uint32_t nents, prp_trans; 899 int i = 0; 900 901 /* 902 * The first PRP list entry, pointed to by PRP2 may contain offset. 903 * Hence, we need to calculate the number of entries in based on 904 * that offset. 905 */ 906 nents = (n->page_size - (prp2 & (n->page_size - 1))) >> 3; 907 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); 908 ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); 909 if (ret) { 910 trace_pci_nvme_err_addr_read(prp2); 911 status = NVME_DATA_TRAS_ERROR; 912 goto unmap; 913 } 914 while (len != 0) { 915 uint64_t prp_ent = le64_to_cpu(prp_list[i]); 916 917 if (i == nents - 1 && len > n->page_size) { 918 if (unlikely(prp_ent & (n->page_size - 1))) { 919 trace_pci_nvme_err_invalid_prplist_ent(prp_ent); 920 status = NVME_INVALID_PRP_OFFSET | NVME_DNR; 921 goto unmap; 922 } 923 924 i = 0; 925 nents = (len + n->page_size - 1) >> n->page_bits; 926 nents = MIN(nents, n->max_prp_ents); 927 prp_trans = nents * sizeof(uint64_t); 928 ret = nvme_addr_read(n, prp_ent, (void *)prp_list, 929 prp_trans); 930 if (ret) { 931 trace_pci_nvme_err_addr_read(prp_ent); 932 status = NVME_DATA_TRAS_ERROR; 933 goto unmap; 934 } 935 prp_ent = le64_to_cpu(prp_list[i]); 936 } 937 938 if (unlikely(prp_ent & (n->page_size - 1))) { 939 trace_pci_nvme_err_invalid_prplist_ent(prp_ent); 940 status = NVME_INVALID_PRP_OFFSET | NVME_DNR; 941 goto unmap; 942 } 943 944 trans_len = MIN(len, n->page_size); 945 status = nvme_map_addr(n, sg, prp_ent, trans_len); 946 if (status) { 947 goto unmap; 948 } 949 950 len -= trans_len; 951 i++; 952 } 953 } else { 954 if (unlikely(prp2 & (n->page_size - 1))) { 955 trace_pci_nvme_err_invalid_prp2_align(prp2); 956 status = NVME_INVALID_PRP_OFFSET | NVME_DNR; 957 goto unmap; 958 } 959 status = nvme_map_addr(n, sg, prp2, len); 960 if (status) { 961 goto unmap; 962 } 963 } 964 } 965 966 return NVME_SUCCESS; 967 968 unmap: 969 nvme_sg_unmap(sg); 970 return status; 971 } 972 973 /* 974 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the 975 * number of bytes mapped in len. 976 */ 977 static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, 978 NvmeSglDescriptor *segment, uint64_t nsgld, 979 size_t *len, NvmeCmd *cmd) 980 { 981 dma_addr_t addr, trans_len; 982 uint32_t dlen; 983 uint16_t status; 984 985 for (int i = 0; i < nsgld; i++) { 986 uint8_t type = NVME_SGL_TYPE(segment[i].type); 987 988 switch (type) { 989 case NVME_SGL_DESCR_TYPE_DATA_BLOCK: 990 break; 991 case NVME_SGL_DESCR_TYPE_SEGMENT: 992 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT: 993 return NVME_INVALID_NUM_SGL_DESCRS | NVME_DNR; 994 default: 995 return NVME_SGL_DESCR_TYPE_INVALID | NVME_DNR; 996 } 997 998 dlen = le32_to_cpu(segment[i].len); 999 1000 if (!dlen) { 1001 continue; 1002 } 1003 1004 if (*len == 0) { 1005 /* 1006 * All data has been mapped, but the SGL contains additional 1007 * segments and/or descriptors. The controller might accept 1008 * ignoring the rest of the SGL. 1009 */ 1010 uint32_t sgls = le32_to_cpu(n->id_ctrl.sgls); 1011 if (sgls & NVME_CTRL_SGLS_EXCESS_LENGTH) { 1012 break; 1013 } 1014 1015 trace_pci_nvme_err_invalid_sgl_excess_length(dlen); 1016 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; 1017 } 1018 1019 trans_len = MIN(*len, dlen); 1020 1021 addr = le64_to_cpu(segment[i].addr); 1022 1023 if (UINT64_MAX - addr < dlen) { 1024 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; 1025 } 1026 1027 status = nvme_map_addr(n, sg, addr, trans_len); 1028 if (status) { 1029 return status; 1030 } 1031 1032 *len -= trans_len; 1033 } 1034 1035 return NVME_SUCCESS; 1036 } 1037 1038 static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, 1039 size_t len, NvmeCmd *cmd) 1040 { 1041 /* 1042 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid 1043 * dynamically allocating a potentially huge SGL. The spec allows the SGL 1044 * to be larger (as in number of bytes required to describe the SGL 1045 * descriptors and segment chain) than the command transfer size, so it is 1046 * not bounded by MDTS. 1047 */ 1048 const int SEG_CHUNK_SIZE = 256; 1049 1050 NvmeSglDescriptor segment[SEG_CHUNK_SIZE], *sgld, *last_sgld; 1051 uint64_t nsgld; 1052 uint32_t seg_len; 1053 uint16_t status; 1054 hwaddr addr; 1055 int ret; 1056 1057 sgld = &sgl; 1058 addr = le64_to_cpu(sgl.addr); 1059 1060 trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl.type), len); 1061 1062 nvme_sg_init(n, sg, nvme_addr_is_dma(n, addr)); 1063 1064 /* 1065 * If the entire transfer can be described with a single data block it can 1066 * be mapped directly. 1067 */ 1068 if (NVME_SGL_TYPE(sgl.type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) { 1069 status = nvme_map_sgl_data(n, sg, sgld, 1, &len, cmd); 1070 if (status) { 1071 goto unmap; 1072 } 1073 1074 goto out; 1075 } 1076 1077 for (;;) { 1078 switch (NVME_SGL_TYPE(sgld->type)) { 1079 case NVME_SGL_DESCR_TYPE_SEGMENT: 1080 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT: 1081 break; 1082 default: 1083 return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; 1084 } 1085 1086 seg_len = le32_to_cpu(sgld->len); 1087 1088 /* check the length of the (Last) Segment descriptor */ 1089 if (!seg_len || seg_len & 0xf) { 1090 return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; 1091 } 1092 1093 if (UINT64_MAX - addr < seg_len) { 1094 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; 1095 } 1096 1097 nsgld = seg_len / sizeof(NvmeSglDescriptor); 1098 1099 while (nsgld > SEG_CHUNK_SIZE) { 1100 if (nvme_addr_read(n, addr, segment, sizeof(segment))) { 1101 trace_pci_nvme_err_addr_read(addr); 1102 status = NVME_DATA_TRAS_ERROR; 1103 goto unmap; 1104 } 1105 1106 status = nvme_map_sgl_data(n, sg, segment, SEG_CHUNK_SIZE, 1107 &len, cmd); 1108 if (status) { 1109 goto unmap; 1110 } 1111 1112 nsgld -= SEG_CHUNK_SIZE; 1113 addr += SEG_CHUNK_SIZE * sizeof(NvmeSglDescriptor); 1114 } 1115 1116 ret = nvme_addr_read(n, addr, segment, nsgld * 1117 sizeof(NvmeSglDescriptor)); 1118 if (ret) { 1119 trace_pci_nvme_err_addr_read(addr); 1120 status = NVME_DATA_TRAS_ERROR; 1121 goto unmap; 1122 } 1123 1124 last_sgld = &segment[nsgld - 1]; 1125 1126 /* 1127 * If the segment ends with a Data Block, then we are done. 1128 */ 1129 if (NVME_SGL_TYPE(last_sgld->type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) { 1130 status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd); 1131 if (status) { 1132 goto unmap; 1133 } 1134 1135 goto out; 1136 } 1137 1138 /* 1139 * If the last descriptor was not a Data Block, then the current 1140 * segment must not be a Last Segment. 1141 */ 1142 if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) { 1143 status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; 1144 goto unmap; 1145 } 1146 1147 sgld = last_sgld; 1148 addr = le64_to_cpu(sgld->addr); 1149 1150 /* 1151 * Do not map the last descriptor; it will be a Segment or Last Segment 1152 * descriptor and is handled by the next iteration. 1153 */ 1154 status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, cmd); 1155 if (status) { 1156 goto unmap; 1157 } 1158 } 1159 1160 out: 1161 /* if there is any residual left in len, the SGL was too short */ 1162 if (len) { 1163 status = NVME_DATA_SGL_LEN_INVALID | NVME_DNR; 1164 goto unmap; 1165 } 1166 1167 return NVME_SUCCESS; 1168 1169 unmap: 1170 nvme_sg_unmap(sg); 1171 return status; 1172 } 1173 1174 uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, 1175 NvmeCmd *cmd) 1176 { 1177 uint64_t prp1, prp2; 1178 1179 switch (NVME_CMD_FLAGS_PSDT(cmd->flags)) { 1180 case NVME_PSDT_PRP: 1181 prp1 = le64_to_cpu(cmd->dptr.prp1); 1182 prp2 = le64_to_cpu(cmd->dptr.prp2); 1183 1184 return nvme_map_prp(n, sg, prp1, prp2, len); 1185 case NVME_PSDT_SGL_MPTR_CONTIGUOUS: 1186 case NVME_PSDT_SGL_MPTR_SGL: 1187 return nvme_map_sgl(n, sg, cmd->dptr.sgl, len, cmd); 1188 default: 1189 return NVME_INVALID_FIELD; 1190 } 1191 } 1192 1193 static uint16_t nvme_map_mptr(NvmeCtrl *n, NvmeSg *sg, size_t len, 1194 NvmeCmd *cmd) 1195 { 1196 int psdt = NVME_CMD_FLAGS_PSDT(cmd->flags); 1197 hwaddr mptr = le64_to_cpu(cmd->mptr); 1198 uint16_t status; 1199 1200 if (psdt == NVME_PSDT_SGL_MPTR_SGL) { 1201 NvmeSglDescriptor sgl; 1202 1203 if (nvme_addr_read(n, mptr, &sgl, sizeof(sgl))) { 1204 return NVME_DATA_TRAS_ERROR; 1205 } 1206 1207 status = nvme_map_sgl(n, sg, sgl, len, cmd); 1208 if (status && (status & 0x7ff) == NVME_DATA_SGL_LEN_INVALID) { 1209 status = NVME_MD_SGL_LEN_INVALID | NVME_DNR; 1210 } 1211 1212 return status; 1213 } 1214 1215 nvme_sg_init(n, sg, nvme_addr_is_dma(n, mptr)); 1216 status = nvme_map_addr(n, sg, mptr, len); 1217 if (status) { 1218 nvme_sg_unmap(sg); 1219 } 1220 1221 return status; 1222 } 1223 1224 static uint16_t nvme_map_data(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) 1225 { 1226 NvmeNamespace *ns = req->ns; 1227 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 1228 bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); 1229 bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); 1230 size_t len = nvme_l2b(ns, nlb); 1231 uint16_t status; 1232 1233 if (nvme_ns_ext(ns) && 1234 !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { 1235 NvmeSg sg; 1236 1237 len += nvme_m2b(ns, nlb); 1238 1239 status = nvme_map_dptr(n, &sg, len, &req->cmd); 1240 if (status) { 1241 return status; 1242 } 1243 1244 nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); 1245 nvme_sg_split(&sg, ns, &req->sg, NULL); 1246 nvme_sg_unmap(&sg); 1247 1248 return NVME_SUCCESS; 1249 } 1250 1251 return nvme_map_dptr(n, &req->sg, len, &req->cmd); 1252 } 1253 1254 static uint16_t nvme_map_mdata(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) 1255 { 1256 NvmeNamespace *ns = req->ns; 1257 size_t len = nvme_m2b(ns, nlb); 1258 uint16_t status; 1259 1260 if (nvme_ns_ext(ns)) { 1261 NvmeSg sg; 1262 1263 len += nvme_l2b(ns, nlb); 1264 1265 status = nvme_map_dptr(n, &sg, len, &req->cmd); 1266 if (status) { 1267 return status; 1268 } 1269 1270 nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); 1271 nvme_sg_split(&sg, ns, NULL, &req->sg); 1272 nvme_sg_unmap(&sg); 1273 1274 return NVME_SUCCESS; 1275 } 1276 1277 return nvme_map_mptr(n, &req->sg, len, &req->cmd); 1278 } 1279 1280 static uint16_t nvme_tx_interleaved(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, 1281 uint32_t len, uint32_t bytes, 1282 int32_t skip_bytes, int64_t offset, 1283 NvmeTxDirection dir) 1284 { 1285 hwaddr addr; 1286 uint32_t trans_len, count = bytes; 1287 bool dma = sg->flags & NVME_SG_DMA; 1288 int64_t sge_len; 1289 int sg_idx = 0; 1290 int ret; 1291 1292 assert(sg->flags & NVME_SG_ALLOC); 1293 1294 while (len) { 1295 sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; 1296 1297 if (sge_len - offset < 0) { 1298 offset -= sge_len; 1299 sg_idx++; 1300 continue; 1301 } 1302 1303 if (sge_len == offset) { 1304 offset = 0; 1305 sg_idx++; 1306 continue; 1307 } 1308 1309 trans_len = MIN(len, count); 1310 trans_len = MIN(trans_len, sge_len - offset); 1311 1312 if (dma) { 1313 addr = sg->qsg.sg[sg_idx].base + offset; 1314 } else { 1315 addr = (hwaddr)(uintptr_t)sg->iov.iov[sg_idx].iov_base + offset; 1316 } 1317 1318 if (dir == NVME_TX_DIRECTION_TO_DEVICE) { 1319 ret = nvme_addr_read(n, addr, ptr, trans_len); 1320 } else { 1321 ret = nvme_addr_write(n, addr, ptr, trans_len); 1322 } 1323 1324 if (ret) { 1325 return NVME_DATA_TRAS_ERROR; 1326 } 1327 1328 ptr += trans_len; 1329 len -= trans_len; 1330 count -= trans_len; 1331 offset += trans_len; 1332 1333 if (count == 0) { 1334 count = bytes; 1335 offset += skip_bytes; 1336 } 1337 } 1338 1339 return NVME_SUCCESS; 1340 } 1341 1342 static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, void *ptr, uint32_t len, 1343 NvmeTxDirection dir) 1344 { 1345 assert(sg->flags & NVME_SG_ALLOC); 1346 1347 if (sg->flags & NVME_SG_DMA) { 1348 const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; 1349 dma_addr_t residual; 1350 1351 if (dir == NVME_TX_DIRECTION_TO_DEVICE) { 1352 dma_buf_write(ptr, len, &residual, &sg->qsg, attrs); 1353 } else { 1354 dma_buf_read(ptr, len, &residual, &sg->qsg, attrs); 1355 } 1356 1357 if (unlikely(residual)) { 1358 trace_pci_nvme_err_invalid_dma(); 1359 return NVME_INVALID_FIELD | NVME_DNR; 1360 } 1361 } else { 1362 size_t bytes; 1363 1364 if (dir == NVME_TX_DIRECTION_TO_DEVICE) { 1365 bytes = qemu_iovec_to_buf(&sg->iov, 0, ptr, len); 1366 } else { 1367 bytes = qemu_iovec_from_buf(&sg->iov, 0, ptr, len); 1368 } 1369 1370 if (unlikely(bytes != len)) { 1371 trace_pci_nvme_err_invalid_dma(); 1372 return NVME_INVALID_FIELD | NVME_DNR; 1373 } 1374 } 1375 1376 return NVME_SUCCESS; 1377 } 1378 1379 static inline uint16_t nvme_c2h(NvmeCtrl *n, void *ptr, uint32_t len, 1380 NvmeRequest *req) 1381 { 1382 uint16_t status; 1383 1384 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); 1385 if (status) { 1386 return status; 1387 } 1388 1389 return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE); 1390 } 1391 1392 static inline uint16_t nvme_h2c(NvmeCtrl *n, void *ptr, uint32_t len, 1393 NvmeRequest *req) 1394 { 1395 uint16_t status; 1396 1397 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); 1398 if (status) { 1399 return status; 1400 } 1401 1402 return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE); 1403 } 1404 1405 uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len, 1406 NvmeTxDirection dir, NvmeRequest *req) 1407 { 1408 NvmeNamespace *ns = req->ns; 1409 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 1410 bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); 1411 bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); 1412 1413 if (nvme_ns_ext(ns) && 1414 !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { 1415 return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbasz, 1416 ns->lbaf.ms, 0, dir); 1417 } 1418 1419 return nvme_tx(n, &req->sg, ptr, len, dir); 1420 } 1421 1422 uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len, 1423 NvmeTxDirection dir, NvmeRequest *req) 1424 { 1425 NvmeNamespace *ns = req->ns; 1426 uint16_t status; 1427 1428 if (nvme_ns_ext(ns)) { 1429 return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbaf.ms, 1430 ns->lbasz, ns->lbasz, dir); 1431 } 1432 1433 nvme_sg_unmap(&req->sg); 1434 1435 status = nvme_map_mptr(n, &req->sg, len, &req->cmd); 1436 if (status) { 1437 return status; 1438 } 1439 1440 return nvme_tx(n, &req->sg, ptr, len, dir); 1441 } 1442 1443 static inline void nvme_blk_read(BlockBackend *blk, int64_t offset, 1444 uint32_t align, BlockCompletionFunc *cb, 1445 NvmeRequest *req) 1446 { 1447 assert(req->sg.flags & NVME_SG_ALLOC); 1448 1449 if (req->sg.flags & NVME_SG_DMA) { 1450 req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, align, cb, req); 1451 } else { 1452 req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req); 1453 } 1454 } 1455 1456 static inline void nvme_blk_write(BlockBackend *blk, int64_t offset, 1457 uint32_t align, BlockCompletionFunc *cb, 1458 NvmeRequest *req) 1459 { 1460 assert(req->sg.flags & NVME_SG_ALLOC); 1461 1462 if (req->sg.flags & NVME_SG_DMA) { 1463 req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, align, cb, req); 1464 } else { 1465 req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req); 1466 } 1467 } 1468 1469 static void nvme_update_cq_eventidx(const NvmeCQueue *cq) 1470 { 1471 trace_pci_nvme_update_cq_eventidx(cq->cqid, cq->head); 1472 1473 stl_le_pci_dma(PCI_DEVICE(cq->ctrl), cq->ei_addr, cq->head, 1474 MEMTXATTRS_UNSPECIFIED); 1475 } 1476 1477 static void nvme_update_cq_head(NvmeCQueue *cq) 1478 { 1479 ldl_le_pci_dma(PCI_DEVICE(cq->ctrl), cq->db_addr, &cq->head, 1480 MEMTXATTRS_UNSPECIFIED); 1481 1482 trace_pci_nvme_update_cq_head(cq->cqid, cq->head); 1483 } 1484 1485 static void nvme_post_cqes(void *opaque) 1486 { 1487 NvmeCQueue *cq = opaque; 1488 NvmeCtrl *n = cq->ctrl; 1489 NvmeRequest *req, *next; 1490 bool pending = cq->head != cq->tail; 1491 int ret; 1492 1493 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { 1494 NvmeSQueue *sq; 1495 hwaddr addr; 1496 1497 if (n->dbbuf_enabled) { 1498 nvme_update_cq_eventidx(cq); 1499 nvme_update_cq_head(cq); 1500 } 1501 1502 if (nvme_cq_full(cq)) { 1503 break; 1504 } 1505 1506 sq = req->sq; 1507 req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase); 1508 req->cqe.sq_id = cpu_to_le16(sq->sqid); 1509 req->cqe.sq_head = cpu_to_le16(sq->head); 1510 addr = cq->dma_addr + cq->tail * n->cqe_size; 1511 ret = pci_dma_write(PCI_DEVICE(n), addr, (void *)&req->cqe, 1512 sizeof(req->cqe)); 1513 if (ret) { 1514 trace_pci_nvme_err_addr_write(addr); 1515 trace_pci_nvme_err_cfs(); 1516 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); 1517 break; 1518 } 1519 QTAILQ_REMOVE(&cq->req_list, req, entry); 1520 nvme_inc_cq_tail(cq); 1521 nvme_sg_unmap(&req->sg); 1522 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); 1523 } 1524 if (cq->tail != cq->head) { 1525 if (cq->irq_enabled && !pending) { 1526 n->cq_pending++; 1527 } 1528 1529 nvme_irq_assert(n, cq); 1530 } 1531 } 1532 1533 static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req) 1534 { 1535 assert(cq->cqid == req->sq->cqid); 1536 trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid, 1537 le32_to_cpu(req->cqe.result), 1538 le32_to_cpu(req->cqe.dw1), 1539 req->status); 1540 1541 if (req->status) { 1542 trace_pci_nvme_err_req_status(nvme_cid(req), nvme_nsid(req->ns), 1543 req->status, req->cmd.opcode); 1544 } 1545 1546 QTAILQ_REMOVE(&req->sq->out_req_list, req, entry); 1547 QTAILQ_INSERT_TAIL(&cq->req_list, req, entry); 1548 1549 qemu_bh_schedule(cq->bh); 1550 } 1551 1552 static void nvme_process_aers(void *opaque) 1553 { 1554 NvmeCtrl *n = opaque; 1555 NvmeAsyncEvent *event, *next; 1556 1557 trace_pci_nvme_process_aers(n->aer_queued); 1558 1559 QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) { 1560 NvmeRequest *req; 1561 NvmeAerResult *result; 1562 1563 /* can't post cqe if there is nothing to complete */ 1564 if (!n->outstanding_aers) { 1565 trace_pci_nvme_no_outstanding_aers(); 1566 break; 1567 } 1568 1569 /* ignore if masked (cqe posted, but event not cleared) */ 1570 if (n->aer_mask & (1 << event->result.event_type)) { 1571 trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask); 1572 continue; 1573 } 1574 1575 QTAILQ_REMOVE(&n->aer_queue, event, entry); 1576 n->aer_queued--; 1577 1578 n->aer_mask |= 1 << event->result.event_type; 1579 n->outstanding_aers--; 1580 1581 req = n->aer_reqs[n->outstanding_aers]; 1582 1583 result = (NvmeAerResult *) &req->cqe.result; 1584 result->event_type = event->result.event_type; 1585 result->event_info = event->result.event_info; 1586 result->log_page = event->result.log_page; 1587 g_free(event); 1588 1589 trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info, 1590 result->log_page); 1591 1592 nvme_enqueue_req_completion(&n->admin_cq, req); 1593 } 1594 } 1595 1596 static void nvme_enqueue_event(NvmeCtrl *n, uint8_t event_type, 1597 uint8_t event_info, uint8_t log_page) 1598 { 1599 NvmeAsyncEvent *event; 1600 1601 trace_pci_nvme_enqueue_event(event_type, event_info, log_page); 1602 1603 if (n->aer_queued == n->params.aer_max_queued) { 1604 trace_pci_nvme_enqueue_event_noqueue(n->aer_queued); 1605 return; 1606 } 1607 1608 event = g_new(NvmeAsyncEvent, 1); 1609 event->result = (NvmeAerResult) { 1610 .event_type = event_type, 1611 .event_info = event_info, 1612 .log_page = log_page, 1613 }; 1614 1615 QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry); 1616 n->aer_queued++; 1617 1618 nvme_process_aers(n); 1619 } 1620 1621 static void nvme_smart_event(NvmeCtrl *n, uint8_t event) 1622 { 1623 uint8_t aer_info; 1624 1625 /* Ref SPEC <Asynchronous Event Information 0x2013 SMART / Health Status> */ 1626 if (!(NVME_AEC_SMART(n->features.async_config) & event)) { 1627 return; 1628 } 1629 1630 switch (event) { 1631 case NVME_SMART_SPARE: 1632 aer_info = NVME_AER_INFO_SMART_SPARE_THRESH; 1633 break; 1634 case NVME_SMART_TEMPERATURE: 1635 aer_info = NVME_AER_INFO_SMART_TEMP_THRESH; 1636 break; 1637 case NVME_SMART_RELIABILITY: 1638 case NVME_SMART_MEDIA_READ_ONLY: 1639 case NVME_SMART_FAILED_VOLATILE_MEDIA: 1640 case NVME_SMART_PMR_UNRELIABLE: 1641 aer_info = NVME_AER_INFO_SMART_RELIABILITY; 1642 break; 1643 default: 1644 return; 1645 } 1646 1647 nvme_enqueue_event(n, NVME_AER_TYPE_SMART, aer_info, NVME_LOG_SMART_INFO); 1648 } 1649 1650 static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type) 1651 { 1652 n->aer_mask &= ~(1 << event_type); 1653 if (!QTAILQ_EMPTY(&n->aer_queue)) { 1654 nvme_process_aers(n); 1655 } 1656 } 1657 1658 static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len) 1659 { 1660 uint8_t mdts = n->params.mdts; 1661 1662 if (mdts && len > n->page_size << mdts) { 1663 trace_pci_nvme_err_mdts(len); 1664 return NVME_INVALID_FIELD | NVME_DNR; 1665 } 1666 1667 return NVME_SUCCESS; 1668 } 1669 1670 static inline uint16_t nvme_check_bounds(NvmeNamespace *ns, uint64_t slba, 1671 uint32_t nlb) 1672 { 1673 uint64_t nsze = le64_to_cpu(ns->id_ns.nsze); 1674 1675 if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) { 1676 trace_pci_nvme_err_invalid_lba_range(slba, nlb, nsze); 1677 return NVME_LBA_RANGE | NVME_DNR; 1678 } 1679 1680 return NVME_SUCCESS; 1681 } 1682 1683 static int nvme_block_status_all(NvmeNamespace *ns, uint64_t slba, 1684 uint32_t nlb, int flags) 1685 { 1686 BlockDriverState *bs = blk_bs(ns->blkconf.blk); 1687 1688 int64_t pnum = 0, bytes = nvme_l2b(ns, nlb); 1689 int64_t offset = nvme_l2b(ns, slba); 1690 int ret; 1691 1692 /* 1693 * `pnum` holds the number of bytes after offset that shares the same 1694 * allocation status as the byte at offset. If `pnum` is different from 1695 * `bytes`, we should check the allocation status of the next range and 1696 * continue this until all bytes have been checked. 1697 */ 1698 do { 1699 bytes -= pnum; 1700 1701 ret = bdrv_block_status(bs, offset, bytes, &pnum, NULL, NULL); 1702 if (ret < 0) { 1703 return ret; 1704 } 1705 1706 1707 trace_pci_nvme_block_status(offset, bytes, pnum, ret, 1708 !!(ret & BDRV_BLOCK_ZERO)); 1709 1710 if (!(ret & flags)) { 1711 return 1; 1712 } 1713 1714 offset += pnum; 1715 } while (pnum != bytes); 1716 1717 return 0; 1718 } 1719 1720 static uint16_t nvme_check_dulbe(NvmeNamespace *ns, uint64_t slba, 1721 uint32_t nlb) 1722 { 1723 int ret; 1724 Error *err = NULL; 1725 1726 ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_DATA); 1727 if (ret) { 1728 if (ret < 0) { 1729 error_setg_errno(&err, -ret, "unable to get block status"); 1730 error_report_err(err); 1731 1732 return NVME_INTERNAL_DEV_ERROR; 1733 } 1734 1735 return NVME_DULB; 1736 } 1737 1738 return NVME_SUCCESS; 1739 } 1740 1741 static void nvme_aio_err(NvmeRequest *req, int ret) 1742 { 1743 uint16_t status = NVME_SUCCESS; 1744 Error *local_err = NULL; 1745 1746 switch (req->cmd.opcode) { 1747 case NVME_CMD_READ: 1748 status = NVME_UNRECOVERED_READ; 1749 break; 1750 case NVME_CMD_FLUSH: 1751 case NVME_CMD_WRITE: 1752 case NVME_CMD_WRITE_ZEROES: 1753 case NVME_CMD_ZONE_APPEND: 1754 case NVME_CMD_COPY: 1755 status = NVME_WRITE_FAULT; 1756 break; 1757 default: 1758 status = NVME_INTERNAL_DEV_ERROR; 1759 break; 1760 } 1761 1762 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status); 1763 1764 error_setg_errno(&local_err, -ret, "aio failed"); 1765 error_report_err(local_err); 1766 1767 /* 1768 * Set the command status code to the first encountered error but allow a 1769 * subsequent Internal Device Error to trump it. 1770 */ 1771 if (req->status && status != NVME_INTERNAL_DEV_ERROR) { 1772 return; 1773 } 1774 1775 req->status = status; 1776 } 1777 1778 static inline uint32_t nvme_zone_idx(NvmeNamespace *ns, uint64_t slba) 1779 { 1780 return ns->zone_size_log2 > 0 ? slba >> ns->zone_size_log2 : 1781 slba / ns->zone_size; 1782 } 1783 1784 static inline NvmeZone *nvme_get_zone_by_slba(NvmeNamespace *ns, uint64_t slba) 1785 { 1786 uint32_t zone_idx = nvme_zone_idx(ns, slba); 1787 1788 if (zone_idx >= ns->num_zones) { 1789 return NULL; 1790 } 1791 1792 return &ns->zone_array[zone_idx]; 1793 } 1794 1795 static uint16_t nvme_check_zone_state_for_write(NvmeZone *zone) 1796 { 1797 uint64_t zslba = zone->d.zslba; 1798 1799 switch (nvme_get_zone_state(zone)) { 1800 case NVME_ZONE_STATE_EMPTY: 1801 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1802 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1803 case NVME_ZONE_STATE_CLOSED: 1804 return NVME_SUCCESS; 1805 case NVME_ZONE_STATE_FULL: 1806 trace_pci_nvme_err_zone_is_full(zslba); 1807 return NVME_ZONE_FULL; 1808 case NVME_ZONE_STATE_OFFLINE: 1809 trace_pci_nvme_err_zone_is_offline(zslba); 1810 return NVME_ZONE_OFFLINE; 1811 case NVME_ZONE_STATE_READ_ONLY: 1812 trace_pci_nvme_err_zone_is_read_only(zslba); 1813 return NVME_ZONE_READ_ONLY; 1814 default: 1815 assert(false); 1816 } 1817 1818 return NVME_INTERNAL_DEV_ERROR; 1819 } 1820 1821 static uint16_t nvme_check_zone_write(NvmeNamespace *ns, NvmeZone *zone, 1822 uint64_t slba, uint32_t nlb) 1823 { 1824 uint64_t zcap = nvme_zone_wr_boundary(zone); 1825 uint16_t status; 1826 1827 status = nvme_check_zone_state_for_write(zone); 1828 if (status) { 1829 return status; 1830 } 1831 1832 if (zone->d.za & NVME_ZA_ZRWA_VALID) { 1833 uint64_t ezrwa = zone->w_ptr + 2 * ns->zns.zrwas; 1834 1835 if (slba < zone->w_ptr || slba + nlb > ezrwa) { 1836 trace_pci_nvme_err_zone_invalid_write(slba, zone->w_ptr); 1837 return NVME_ZONE_INVALID_WRITE; 1838 } 1839 } else { 1840 if (unlikely(slba != zone->w_ptr)) { 1841 trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba, 1842 zone->w_ptr); 1843 return NVME_ZONE_INVALID_WRITE; 1844 } 1845 } 1846 1847 if (unlikely((slba + nlb) > zcap)) { 1848 trace_pci_nvme_err_zone_boundary(slba, nlb, zcap); 1849 return NVME_ZONE_BOUNDARY_ERROR; 1850 } 1851 1852 return NVME_SUCCESS; 1853 } 1854 1855 static uint16_t nvme_check_zone_state_for_read(NvmeZone *zone) 1856 { 1857 switch (nvme_get_zone_state(zone)) { 1858 case NVME_ZONE_STATE_EMPTY: 1859 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1860 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1861 case NVME_ZONE_STATE_FULL: 1862 case NVME_ZONE_STATE_CLOSED: 1863 case NVME_ZONE_STATE_READ_ONLY: 1864 return NVME_SUCCESS; 1865 case NVME_ZONE_STATE_OFFLINE: 1866 trace_pci_nvme_err_zone_is_offline(zone->d.zslba); 1867 return NVME_ZONE_OFFLINE; 1868 default: 1869 assert(false); 1870 } 1871 1872 return NVME_INTERNAL_DEV_ERROR; 1873 } 1874 1875 static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba, 1876 uint32_t nlb) 1877 { 1878 NvmeZone *zone; 1879 uint64_t bndry, end; 1880 uint16_t status; 1881 1882 zone = nvme_get_zone_by_slba(ns, slba); 1883 assert(zone); 1884 1885 bndry = nvme_zone_rd_boundary(ns, zone); 1886 end = slba + nlb; 1887 1888 status = nvme_check_zone_state_for_read(zone); 1889 if (status) { 1890 ; 1891 } else if (unlikely(end > bndry)) { 1892 if (!ns->params.cross_zone_read) { 1893 status = NVME_ZONE_BOUNDARY_ERROR; 1894 } else { 1895 /* 1896 * Read across zone boundary - check that all subsequent 1897 * zones that are being read have an appropriate state. 1898 */ 1899 do { 1900 zone++; 1901 status = nvme_check_zone_state_for_read(zone); 1902 if (status) { 1903 break; 1904 } 1905 } while (end > nvme_zone_rd_boundary(ns, zone)); 1906 } 1907 } 1908 1909 return status; 1910 } 1911 1912 static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone) 1913 { 1914 switch (nvme_get_zone_state(zone)) { 1915 case NVME_ZONE_STATE_FULL: 1916 return NVME_SUCCESS; 1917 1918 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1919 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1920 nvme_aor_dec_open(ns); 1921 /* fallthrough */ 1922 case NVME_ZONE_STATE_CLOSED: 1923 nvme_aor_dec_active(ns); 1924 1925 if (zone->d.za & NVME_ZA_ZRWA_VALID) { 1926 zone->d.za &= ~NVME_ZA_ZRWA_VALID; 1927 if (ns->params.numzrwa) { 1928 ns->zns.numzrwa++; 1929 } 1930 } 1931 1932 /* fallthrough */ 1933 case NVME_ZONE_STATE_EMPTY: 1934 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL); 1935 return NVME_SUCCESS; 1936 1937 default: 1938 return NVME_ZONE_INVAL_TRANSITION; 1939 } 1940 } 1941 1942 static uint16_t nvme_zrm_close(NvmeNamespace *ns, NvmeZone *zone) 1943 { 1944 switch (nvme_get_zone_state(zone)) { 1945 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1946 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1947 nvme_aor_dec_open(ns); 1948 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED); 1949 /* fall through */ 1950 case NVME_ZONE_STATE_CLOSED: 1951 return NVME_SUCCESS; 1952 1953 default: 1954 return NVME_ZONE_INVAL_TRANSITION; 1955 } 1956 } 1957 1958 static uint16_t nvme_zrm_reset(NvmeNamespace *ns, NvmeZone *zone) 1959 { 1960 switch (nvme_get_zone_state(zone)) { 1961 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 1962 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 1963 nvme_aor_dec_open(ns); 1964 /* fallthrough */ 1965 case NVME_ZONE_STATE_CLOSED: 1966 nvme_aor_dec_active(ns); 1967 1968 if (zone->d.za & NVME_ZA_ZRWA_VALID) { 1969 if (ns->params.numzrwa) { 1970 ns->zns.numzrwa++; 1971 } 1972 } 1973 1974 /* fallthrough */ 1975 case NVME_ZONE_STATE_FULL: 1976 zone->w_ptr = zone->d.zslba; 1977 zone->d.wp = zone->w_ptr; 1978 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EMPTY); 1979 /* fallthrough */ 1980 case NVME_ZONE_STATE_EMPTY: 1981 return NVME_SUCCESS; 1982 1983 default: 1984 return NVME_ZONE_INVAL_TRANSITION; 1985 } 1986 } 1987 1988 static void nvme_zrm_auto_transition_zone(NvmeNamespace *ns) 1989 { 1990 NvmeZone *zone; 1991 1992 if (ns->params.max_open_zones && 1993 ns->nr_open_zones == ns->params.max_open_zones) { 1994 zone = QTAILQ_FIRST(&ns->imp_open_zones); 1995 if (zone) { 1996 /* 1997 * Automatically close this implicitly open zone. 1998 */ 1999 QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); 2000 nvme_zrm_close(ns, zone); 2001 } 2002 } 2003 } 2004 2005 enum { 2006 NVME_ZRM_AUTO = 1 << 0, 2007 NVME_ZRM_ZRWA = 1 << 1, 2008 }; 2009 2010 static uint16_t nvme_zrm_open_flags(NvmeCtrl *n, NvmeNamespace *ns, 2011 NvmeZone *zone, int flags) 2012 { 2013 int act = 0; 2014 uint16_t status; 2015 2016 switch (nvme_get_zone_state(zone)) { 2017 case NVME_ZONE_STATE_EMPTY: 2018 act = 1; 2019 2020 /* fallthrough */ 2021 2022 case NVME_ZONE_STATE_CLOSED: 2023 if (n->params.auto_transition_zones) { 2024 nvme_zrm_auto_transition_zone(ns); 2025 } 2026 status = nvme_zns_check_resources(ns, act, 1, 2027 (flags & NVME_ZRM_ZRWA) ? 1 : 0); 2028 if (status) { 2029 return status; 2030 } 2031 2032 if (act) { 2033 nvme_aor_inc_active(ns); 2034 } 2035 2036 nvme_aor_inc_open(ns); 2037 2038 if (flags & NVME_ZRM_AUTO) { 2039 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN); 2040 return NVME_SUCCESS; 2041 } 2042 2043 /* fallthrough */ 2044 2045 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 2046 if (flags & NVME_ZRM_AUTO) { 2047 return NVME_SUCCESS; 2048 } 2049 2050 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN); 2051 2052 /* fallthrough */ 2053 2054 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 2055 if (flags & NVME_ZRM_ZRWA) { 2056 ns->zns.numzrwa--; 2057 2058 zone->d.za |= NVME_ZA_ZRWA_VALID; 2059 } 2060 2061 return NVME_SUCCESS; 2062 2063 default: 2064 return NVME_ZONE_INVAL_TRANSITION; 2065 } 2066 } 2067 2068 static inline uint16_t nvme_zrm_auto(NvmeCtrl *n, NvmeNamespace *ns, 2069 NvmeZone *zone) 2070 { 2071 return nvme_zrm_open_flags(n, ns, zone, NVME_ZRM_AUTO); 2072 } 2073 2074 static void nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone, 2075 uint32_t nlb) 2076 { 2077 zone->d.wp += nlb; 2078 2079 if (zone->d.wp == nvme_zone_wr_boundary(zone)) { 2080 nvme_zrm_finish(ns, zone); 2081 } 2082 } 2083 2084 static void nvme_zoned_zrwa_implicit_flush(NvmeNamespace *ns, NvmeZone *zone, 2085 uint32_t nlbc) 2086 { 2087 uint16_t nzrwafgs = DIV_ROUND_UP(nlbc, ns->zns.zrwafg); 2088 2089 nlbc = nzrwafgs * ns->zns.zrwafg; 2090 2091 trace_pci_nvme_zoned_zrwa_implicit_flush(zone->d.zslba, nlbc); 2092 2093 zone->w_ptr += nlbc; 2094 2095 nvme_advance_zone_wp(ns, zone, nlbc); 2096 } 2097 2098 static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req) 2099 { 2100 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2101 NvmeZone *zone; 2102 uint64_t slba; 2103 uint32_t nlb; 2104 2105 slba = le64_to_cpu(rw->slba); 2106 nlb = le16_to_cpu(rw->nlb) + 1; 2107 zone = nvme_get_zone_by_slba(ns, slba); 2108 assert(zone); 2109 2110 if (zone->d.za & NVME_ZA_ZRWA_VALID) { 2111 uint64_t ezrwa = zone->w_ptr + ns->zns.zrwas - 1; 2112 uint64_t elba = slba + nlb - 1; 2113 2114 if (elba > ezrwa) { 2115 nvme_zoned_zrwa_implicit_flush(ns, zone, elba - ezrwa); 2116 } 2117 2118 return; 2119 } 2120 2121 nvme_advance_zone_wp(ns, zone, nlb); 2122 } 2123 2124 static inline bool nvme_is_write(NvmeRequest *req) 2125 { 2126 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2127 2128 return rw->opcode == NVME_CMD_WRITE || 2129 rw->opcode == NVME_CMD_ZONE_APPEND || 2130 rw->opcode == NVME_CMD_WRITE_ZEROES; 2131 } 2132 2133 static AioContext *nvme_get_aio_context(BlockAIOCB *acb) 2134 { 2135 return qemu_get_aio_context(); 2136 } 2137 2138 static void nvme_misc_cb(void *opaque, int ret) 2139 { 2140 NvmeRequest *req = opaque; 2141 2142 trace_pci_nvme_misc_cb(nvme_cid(req)); 2143 2144 if (ret) { 2145 nvme_aio_err(req, ret); 2146 } 2147 2148 nvme_enqueue_req_completion(nvme_cq(req), req); 2149 } 2150 2151 void nvme_rw_complete_cb(void *opaque, int ret) 2152 { 2153 NvmeRequest *req = opaque; 2154 NvmeNamespace *ns = req->ns; 2155 BlockBackend *blk = ns->blkconf.blk; 2156 BlockAcctCookie *acct = &req->acct; 2157 BlockAcctStats *stats = blk_get_stats(blk); 2158 2159 trace_pci_nvme_rw_complete_cb(nvme_cid(req), blk_name(blk)); 2160 2161 if (ret) { 2162 block_acct_failed(stats, acct); 2163 nvme_aio_err(req, ret); 2164 } else { 2165 block_acct_done(stats, acct); 2166 } 2167 2168 if (ns->params.zoned && nvme_is_write(req)) { 2169 nvme_finalize_zoned_write(ns, req); 2170 } 2171 2172 nvme_enqueue_req_completion(nvme_cq(req), req); 2173 } 2174 2175 static void nvme_rw_cb(void *opaque, int ret) 2176 { 2177 NvmeRequest *req = opaque; 2178 NvmeNamespace *ns = req->ns; 2179 2180 BlockBackend *blk = ns->blkconf.blk; 2181 2182 trace_pci_nvme_rw_cb(nvme_cid(req), blk_name(blk)); 2183 2184 if (ret) { 2185 goto out; 2186 } 2187 2188 if (ns->lbaf.ms) { 2189 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2190 uint64_t slba = le64_to_cpu(rw->slba); 2191 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; 2192 uint64_t offset = nvme_moff(ns, slba); 2193 2194 if (req->cmd.opcode == NVME_CMD_WRITE_ZEROES) { 2195 size_t mlen = nvme_m2b(ns, nlb); 2196 2197 req->aiocb = blk_aio_pwrite_zeroes(blk, offset, mlen, 2198 BDRV_REQ_MAY_UNMAP, 2199 nvme_rw_complete_cb, req); 2200 return; 2201 } 2202 2203 if (nvme_ns_ext(ns) || req->cmd.mptr) { 2204 uint16_t status; 2205 2206 nvme_sg_unmap(&req->sg); 2207 status = nvme_map_mdata(nvme_ctrl(req), nlb, req); 2208 if (status) { 2209 ret = -EFAULT; 2210 goto out; 2211 } 2212 2213 if (req->cmd.opcode == NVME_CMD_READ) { 2214 return nvme_blk_read(blk, offset, 1, nvme_rw_complete_cb, req); 2215 } 2216 2217 return nvme_blk_write(blk, offset, 1, nvme_rw_complete_cb, req); 2218 } 2219 } 2220 2221 out: 2222 nvme_rw_complete_cb(req, ret); 2223 } 2224 2225 static void nvme_verify_cb(void *opaque, int ret) 2226 { 2227 NvmeBounceContext *ctx = opaque; 2228 NvmeRequest *req = ctx->req; 2229 NvmeNamespace *ns = req->ns; 2230 BlockBackend *blk = ns->blkconf.blk; 2231 BlockAcctCookie *acct = &req->acct; 2232 BlockAcctStats *stats = blk_get_stats(blk); 2233 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2234 uint64_t slba = le64_to_cpu(rw->slba); 2235 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); 2236 uint16_t apptag = le16_to_cpu(rw->apptag); 2237 uint16_t appmask = le16_to_cpu(rw->appmask); 2238 uint64_t reftag = le32_to_cpu(rw->reftag); 2239 uint64_t cdw3 = le32_to_cpu(rw->cdw3); 2240 uint16_t status; 2241 2242 reftag |= cdw3 << 32; 2243 2244 trace_pci_nvme_verify_cb(nvme_cid(req), prinfo, apptag, appmask, reftag); 2245 2246 if (ret) { 2247 block_acct_failed(stats, acct); 2248 nvme_aio_err(req, ret); 2249 goto out; 2250 } 2251 2252 block_acct_done(stats, acct); 2253 2254 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 2255 status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce, 2256 ctx->mdata.iov.size, slba); 2257 if (status) { 2258 req->status = status; 2259 goto out; 2260 } 2261 2262 req->status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, 2263 ctx->mdata.bounce, ctx->mdata.iov.size, 2264 prinfo, slba, apptag, appmask, &reftag); 2265 } 2266 2267 out: 2268 qemu_iovec_destroy(&ctx->data.iov); 2269 g_free(ctx->data.bounce); 2270 2271 qemu_iovec_destroy(&ctx->mdata.iov); 2272 g_free(ctx->mdata.bounce); 2273 2274 g_free(ctx); 2275 2276 nvme_enqueue_req_completion(nvme_cq(req), req); 2277 } 2278 2279 2280 static void nvme_verify_mdata_in_cb(void *opaque, int ret) 2281 { 2282 NvmeBounceContext *ctx = opaque; 2283 NvmeRequest *req = ctx->req; 2284 NvmeNamespace *ns = req->ns; 2285 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2286 uint64_t slba = le64_to_cpu(rw->slba); 2287 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; 2288 size_t mlen = nvme_m2b(ns, nlb); 2289 uint64_t offset = nvme_moff(ns, slba); 2290 BlockBackend *blk = ns->blkconf.blk; 2291 2292 trace_pci_nvme_verify_mdata_in_cb(nvme_cid(req), blk_name(blk)); 2293 2294 if (ret) { 2295 goto out; 2296 } 2297 2298 ctx->mdata.bounce = g_malloc(mlen); 2299 2300 qemu_iovec_reset(&ctx->mdata.iov); 2301 qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); 2302 2303 req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, 2304 nvme_verify_cb, ctx); 2305 return; 2306 2307 out: 2308 nvme_verify_cb(ctx, ret); 2309 } 2310 2311 struct nvme_compare_ctx { 2312 struct { 2313 QEMUIOVector iov; 2314 uint8_t *bounce; 2315 } data; 2316 2317 struct { 2318 QEMUIOVector iov; 2319 uint8_t *bounce; 2320 } mdata; 2321 }; 2322 2323 static void nvme_compare_mdata_cb(void *opaque, int ret) 2324 { 2325 NvmeRequest *req = opaque; 2326 NvmeNamespace *ns = req->ns; 2327 NvmeCtrl *n = nvme_ctrl(req); 2328 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2329 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); 2330 uint16_t apptag = le16_to_cpu(rw->apptag); 2331 uint16_t appmask = le16_to_cpu(rw->appmask); 2332 uint64_t reftag = le32_to_cpu(rw->reftag); 2333 uint64_t cdw3 = le32_to_cpu(rw->cdw3); 2334 struct nvme_compare_ctx *ctx = req->opaque; 2335 g_autofree uint8_t *buf = NULL; 2336 BlockBackend *blk = ns->blkconf.blk; 2337 BlockAcctCookie *acct = &req->acct; 2338 BlockAcctStats *stats = blk_get_stats(blk); 2339 uint16_t status = NVME_SUCCESS; 2340 2341 reftag |= cdw3 << 32; 2342 2343 trace_pci_nvme_compare_mdata_cb(nvme_cid(req)); 2344 2345 if (ret) { 2346 block_acct_failed(stats, acct); 2347 nvme_aio_err(req, ret); 2348 goto out; 2349 } 2350 2351 buf = g_malloc(ctx->mdata.iov.size); 2352 2353 status = nvme_bounce_mdata(n, buf, ctx->mdata.iov.size, 2354 NVME_TX_DIRECTION_TO_DEVICE, req); 2355 if (status) { 2356 req->status = status; 2357 goto out; 2358 } 2359 2360 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 2361 uint64_t slba = le64_to_cpu(rw->slba); 2362 uint8_t *bufp; 2363 uint8_t *mbufp = ctx->mdata.bounce; 2364 uint8_t *end = mbufp + ctx->mdata.iov.size; 2365 int16_t pil = 0; 2366 2367 status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, 2368 ctx->mdata.bounce, ctx->mdata.iov.size, prinfo, 2369 slba, apptag, appmask, &reftag); 2370 if (status) { 2371 req->status = status; 2372 goto out; 2373 } 2374 2375 /* 2376 * When formatted with protection information, do not compare the DIF 2377 * tuple. 2378 */ 2379 if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { 2380 pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); 2381 } 2382 2383 for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { 2384 if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) { 2385 req->status = NVME_CMP_FAILURE | NVME_DNR; 2386 goto out; 2387 } 2388 } 2389 2390 goto out; 2391 } 2392 2393 if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { 2394 req->status = NVME_CMP_FAILURE | NVME_DNR; 2395 goto out; 2396 } 2397 2398 block_acct_done(stats, acct); 2399 2400 out: 2401 qemu_iovec_destroy(&ctx->data.iov); 2402 g_free(ctx->data.bounce); 2403 2404 qemu_iovec_destroy(&ctx->mdata.iov); 2405 g_free(ctx->mdata.bounce); 2406 2407 g_free(ctx); 2408 2409 nvme_enqueue_req_completion(nvme_cq(req), req); 2410 } 2411 2412 static void nvme_compare_data_cb(void *opaque, int ret) 2413 { 2414 NvmeRequest *req = opaque; 2415 NvmeCtrl *n = nvme_ctrl(req); 2416 NvmeNamespace *ns = req->ns; 2417 BlockBackend *blk = ns->blkconf.blk; 2418 BlockAcctCookie *acct = &req->acct; 2419 BlockAcctStats *stats = blk_get_stats(blk); 2420 2421 struct nvme_compare_ctx *ctx = req->opaque; 2422 g_autofree uint8_t *buf = NULL; 2423 uint16_t status; 2424 2425 trace_pci_nvme_compare_data_cb(nvme_cid(req)); 2426 2427 if (ret) { 2428 block_acct_failed(stats, acct); 2429 nvme_aio_err(req, ret); 2430 goto out; 2431 } 2432 2433 buf = g_malloc(ctx->data.iov.size); 2434 2435 status = nvme_bounce_data(n, buf, ctx->data.iov.size, 2436 NVME_TX_DIRECTION_TO_DEVICE, req); 2437 if (status) { 2438 req->status = status; 2439 goto out; 2440 } 2441 2442 if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) { 2443 req->status = NVME_CMP_FAILURE | NVME_DNR; 2444 goto out; 2445 } 2446 2447 if (ns->lbaf.ms) { 2448 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2449 uint64_t slba = le64_to_cpu(rw->slba); 2450 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; 2451 size_t mlen = nvme_m2b(ns, nlb); 2452 uint64_t offset = nvme_moff(ns, slba); 2453 2454 ctx->mdata.bounce = g_malloc(mlen); 2455 2456 qemu_iovec_init(&ctx->mdata.iov, 1); 2457 qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); 2458 2459 req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, 2460 nvme_compare_mdata_cb, req); 2461 return; 2462 } 2463 2464 block_acct_done(stats, acct); 2465 2466 out: 2467 qemu_iovec_destroy(&ctx->data.iov); 2468 g_free(ctx->data.bounce); 2469 g_free(ctx); 2470 2471 nvme_enqueue_req_completion(nvme_cq(req), req); 2472 } 2473 2474 typedef struct NvmeDSMAIOCB { 2475 BlockAIOCB common; 2476 BlockAIOCB *aiocb; 2477 NvmeRequest *req; 2478 int ret; 2479 2480 NvmeDsmRange *range; 2481 unsigned int nr; 2482 unsigned int idx; 2483 } NvmeDSMAIOCB; 2484 2485 static void nvme_dsm_cancel(BlockAIOCB *aiocb) 2486 { 2487 NvmeDSMAIOCB *iocb = container_of(aiocb, NvmeDSMAIOCB, common); 2488 2489 /* break nvme_dsm_cb loop */ 2490 iocb->idx = iocb->nr; 2491 iocb->ret = -ECANCELED; 2492 2493 if (iocb->aiocb) { 2494 blk_aio_cancel_async(iocb->aiocb); 2495 iocb->aiocb = NULL; 2496 } else { 2497 /* 2498 * We only reach this if nvme_dsm_cancel() has already been called or 2499 * the command ran to completion. 2500 */ 2501 assert(iocb->idx == iocb->nr); 2502 } 2503 } 2504 2505 static const AIOCBInfo nvme_dsm_aiocb_info = { 2506 .aiocb_size = sizeof(NvmeDSMAIOCB), 2507 .cancel_async = nvme_dsm_cancel, 2508 }; 2509 2510 static void nvme_dsm_cb(void *opaque, int ret); 2511 2512 static void nvme_dsm_md_cb(void *opaque, int ret) 2513 { 2514 NvmeDSMAIOCB *iocb = opaque; 2515 NvmeRequest *req = iocb->req; 2516 NvmeNamespace *ns = req->ns; 2517 NvmeDsmRange *range; 2518 uint64_t slba; 2519 uint32_t nlb; 2520 2521 if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { 2522 goto done; 2523 } 2524 2525 range = &iocb->range[iocb->idx - 1]; 2526 slba = le64_to_cpu(range->slba); 2527 nlb = le32_to_cpu(range->nlb); 2528 2529 /* 2530 * Check that all block were discarded (zeroed); otherwise we do not zero 2531 * the metadata. 2532 */ 2533 2534 ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_ZERO); 2535 if (ret) { 2536 if (ret < 0) { 2537 goto done; 2538 } 2539 2540 nvme_dsm_cb(iocb, 0); 2541 return; 2542 } 2543 2544 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, nvme_moff(ns, slba), 2545 nvme_m2b(ns, nlb), BDRV_REQ_MAY_UNMAP, 2546 nvme_dsm_cb, iocb); 2547 return; 2548 2549 done: 2550 nvme_dsm_cb(iocb, ret); 2551 } 2552 2553 static void nvme_dsm_cb(void *opaque, int ret) 2554 { 2555 NvmeDSMAIOCB *iocb = opaque; 2556 NvmeRequest *req = iocb->req; 2557 NvmeCtrl *n = nvme_ctrl(req); 2558 NvmeNamespace *ns = req->ns; 2559 NvmeDsmRange *range; 2560 uint64_t slba; 2561 uint32_t nlb; 2562 2563 if (iocb->ret < 0) { 2564 goto done; 2565 } else if (ret < 0) { 2566 iocb->ret = ret; 2567 goto done; 2568 } 2569 2570 next: 2571 if (iocb->idx == iocb->nr) { 2572 goto done; 2573 } 2574 2575 range = &iocb->range[iocb->idx++]; 2576 slba = le64_to_cpu(range->slba); 2577 nlb = le32_to_cpu(range->nlb); 2578 2579 trace_pci_nvme_dsm_deallocate(slba, nlb); 2580 2581 if (nlb > n->dmrsl) { 2582 trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, n->dmrsl); 2583 goto next; 2584 } 2585 2586 if (nvme_check_bounds(ns, slba, nlb)) { 2587 trace_pci_nvme_err_invalid_lba_range(slba, nlb, 2588 ns->id_ns.nsze); 2589 goto next; 2590 } 2591 2592 iocb->aiocb = blk_aio_pdiscard(ns->blkconf.blk, nvme_l2b(ns, slba), 2593 nvme_l2b(ns, nlb), 2594 nvme_dsm_md_cb, iocb); 2595 return; 2596 2597 done: 2598 iocb->aiocb = NULL; 2599 iocb->common.cb(iocb->common.opaque, iocb->ret); 2600 qemu_aio_unref(iocb); 2601 } 2602 2603 static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) 2604 { 2605 NvmeNamespace *ns = req->ns; 2606 NvmeDsmCmd *dsm = (NvmeDsmCmd *) &req->cmd; 2607 uint32_t attr = le32_to_cpu(dsm->attributes); 2608 uint32_t nr = (le32_to_cpu(dsm->nr) & 0xff) + 1; 2609 uint16_t status = NVME_SUCCESS; 2610 2611 trace_pci_nvme_dsm(nr, attr); 2612 2613 if (attr & NVME_DSMGMT_AD) { 2614 NvmeDSMAIOCB *iocb = blk_aio_get(&nvme_dsm_aiocb_info, ns->blkconf.blk, 2615 nvme_misc_cb, req); 2616 2617 iocb->req = req; 2618 iocb->ret = 0; 2619 iocb->range = g_new(NvmeDsmRange, nr); 2620 iocb->nr = nr; 2621 iocb->idx = 0; 2622 2623 status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr, 2624 req); 2625 if (status) { 2626 g_free(iocb->range); 2627 qemu_aio_unref(iocb); 2628 2629 return status; 2630 } 2631 2632 req->aiocb = &iocb->common; 2633 nvme_dsm_cb(iocb, 0); 2634 2635 return NVME_NO_COMPLETE; 2636 } 2637 2638 return status; 2639 } 2640 2641 static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req) 2642 { 2643 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 2644 NvmeNamespace *ns = req->ns; 2645 BlockBackend *blk = ns->blkconf.blk; 2646 uint64_t slba = le64_to_cpu(rw->slba); 2647 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; 2648 size_t len = nvme_l2b(ns, nlb); 2649 int64_t offset = nvme_l2b(ns, slba); 2650 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); 2651 uint32_t reftag = le32_to_cpu(rw->reftag); 2652 NvmeBounceContext *ctx = NULL; 2653 uint16_t status; 2654 2655 trace_pci_nvme_verify(nvme_cid(req), nvme_nsid(ns), slba, nlb); 2656 2657 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 2658 status = nvme_check_prinfo(ns, prinfo, slba, reftag); 2659 if (status) { 2660 return status; 2661 } 2662 2663 if (prinfo & NVME_PRINFO_PRACT) { 2664 return NVME_INVALID_PROT_INFO | NVME_DNR; 2665 } 2666 } 2667 2668 if (len > n->page_size << n->params.vsl) { 2669 return NVME_INVALID_FIELD | NVME_DNR; 2670 } 2671 2672 status = nvme_check_bounds(ns, slba, nlb); 2673 if (status) { 2674 return status; 2675 } 2676 2677 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { 2678 status = nvme_check_dulbe(ns, slba, nlb); 2679 if (status) { 2680 return status; 2681 } 2682 } 2683 2684 ctx = g_new0(NvmeBounceContext, 1); 2685 ctx->req = req; 2686 2687 ctx->data.bounce = g_malloc(len); 2688 2689 qemu_iovec_init(&ctx->data.iov, 1); 2690 qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len); 2691 2692 block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size, 2693 BLOCK_ACCT_READ); 2694 2695 req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0, 2696 nvme_verify_mdata_in_cb, ctx); 2697 return NVME_NO_COMPLETE; 2698 } 2699 2700 typedef struct NvmeCopyAIOCB { 2701 BlockAIOCB common; 2702 BlockAIOCB *aiocb; 2703 NvmeRequest *req; 2704 int ret; 2705 2706 void *ranges; 2707 unsigned int format; 2708 int nr; 2709 int idx; 2710 2711 uint8_t *bounce; 2712 QEMUIOVector iov; 2713 struct { 2714 BlockAcctCookie read; 2715 BlockAcctCookie write; 2716 } acct; 2717 2718 uint64_t reftag; 2719 uint64_t slba; 2720 2721 NvmeZone *zone; 2722 } NvmeCopyAIOCB; 2723 2724 static void nvme_copy_cancel(BlockAIOCB *aiocb) 2725 { 2726 NvmeCopyAIOCB *iocb = container_of(aiocb, NvmeCopyAIOCB, common); 2727 2728 iocb->ret = -ECANCELED; 2729 2730 if (iocb->aiocb) { 2731 blk_aio_cancel_async(iocb->aiocb); 2732 iocb->aiocb = NULL; 2733 } 2734 } 2735 2736 static const AIOCBInfo nvme_copy_aiocb_info = { 2737 .aiocb_size = sizeof(NvmeCopyAIOCB), 2738 .cancel_async = nvme_copy_cancel, 2739 }; 2740 2741 static void nvme_copy_done(NvmeCopyAIOCB *iocb) 2742 { 2743 NvmeRequest *req = iocb->req; 2744 NvmeNamespace *ns = req->ns; 2745 BlockAcctStats *stats = blk_get_stats(ns->blkconf.blk); 2746 2747 if (iocb->idx != iocb->nr) { 2748 req->cqe.result = cpu_to_le32(iocb->idx); 2749 } 2750 2751 qemu_iovec_destroy(&iocb->iov); 2752 g_free(iocb->bounce); 2753 2754 if (iocb->ret < 0) { 2755 block_acct_failed(stats, &iocb->acct.read); 2756 block_acct_failed(stats, &iocb->acct.write); 2757 } else { 2758 block_acct_done(stats, &iocb->acct.read); 2759 block_acct_done(stats, &iocb->acct.write); 2760 } 2761 2762 iocb->common.cb(iocb->common.opaque, iocb->ret); 2763 qemu_aio_unref(iocb); 2764 } 2765 2766 static void nvme_do_copy(NvmeCopyAIOCB *iocb); 2767 2768 static void nvme_copy_source_range_parse_format0(void *ranges, int idx, 2769 uint64_t *slba, uint32_t *nlb, 2770 uint16_t *apptag, 2771 uint16_t *appmask, 2772 uint64_t *reftag) 2773 { 2774 NvmeCopySourceRangeFormat0 *_ranges = ranges; 2775 2776 if (slba) { 2777 *slba = le64_to_cpu(_ranges[idx].slba); 2778 } 2779 2780 if (nlb) { 2781 *nlb = le16_to_cpu(_ranges[idx].nlb) + 1; 2782 } 2783 2784 if (apptag) { 2785 *apptag = le16_to_cpu(_ranges[idx].apptag); 2786 } 2787 2788 if (appmask) { 2789 *appmask = le16_to_cpu(_ranges[idx].appmask); 2790 } 2791 2792 if (reftag) { 2793 *reftag = le32_to_cpu(_ranges[idx].reftag); 2794 } 2795 } 2796 2797 static void nvme_copy_source_range_parse_format1(void *ranges, int idx, 2798 uint64_t *slba, uint32_t *nlb, 2799 uint16_t *apptag, 2800 uint16_t *appmask, 2801 uint64_t *reftag) 2802 { 2803 NvmeCopySourceRangeFormat1 *_ranges = ranges; 2804 2805 if (slba) { 2806 *slba = le64_to_cpu(_ranges[idx].slba); 2807 } 2808 2809 if (nlb) { 2810 *nlb = le16_to_cpu(_ranges[idx].nlb) + 1; 2811 } 2812 2813 if (apptag) { 2814 *apptag = le16_to_cpu(_ranges[idx].apptag); 2815 } 2816 2817 if (appmask) { 2818 *appmask = le16_to_cpu(_ranges[idx].appmask); 2819 } 2820 2821 if (reftag) { 2822 *reftag = 0; 2823 2824 *reftag |= (uint64_t)_ranges[idx].sr[4] << 40; 2825 *reftag |= (uint64_t)_ranges[idx].sr[5] << 32; 2826 *reftag |= (uint64_t)_ranges[idx].sr[6] << 24; 2827 *reftag |= (uint64_t)_ranges[idx].sr[7] << 16; 2828 *reftag |= (uint64_t)_ranges[idx].sr[8] << 8; 2829 *reftag |= (uint64_t)_ranges[idx].sr[9]; 2830 } 2831 } 2832 2833 static void nvme_copy_source_range_parse(void *ranges, int idx, uint8_t format, 2834 uint64_t *slba, uint32_t *nlb, 2835 uint16_t *apptag, uint16_t *appmask, 2836 uint64_t *reftag) 2837 { 2838 switch (format) { 2839 case NVME_COPY_FORMAT_0: 2840 nvme_copy_source_range_parse_format0(ranges, idx, slba, nlb, apptag, 2841 appmask, reftag); 2842 break; 2843 2844 case NVME_COPY_FORMAT_1: 2845 nvme_copy_source_range_parse_format1(ranges, idx, slba, nlb, apptag, 2846 appmask, reftag); 2847 break; 2848 2849 default: 2850 abort(); 2851 } 2852 } 2853 2854 static inline uint16_t nvme_check_copy_mcl(NvmeNamespace *ns, 2855 NvmeCopyAIOCB *iocb, uint16_t nr) 2856 { 2857 uint32_t copy_len = 0; 2858 2859 for (int idx = 0; idx < nr; idx++) { 2860 uint32_t nlb; 2861 nvme_copy_source_range_parse(iocb->ranges, idx, iocb->format, NULL, 2862 &nlb, NULL, NULL, NULL); 2863 copy_len += nlb + 1; 2864 } 2865 2866 if (copy_len > ns->id_ns.mcl) { 2867 return NVME_CMD_SIZE_LIMIT | NVME_DNR; 2868 } 2869 2870 return NVME_SUCCESS; 2871 } 2872 2873 static void nvme_copy_out_completed_cb(void *opaque, int ret) 2874 { 2875 NvmeCopyAIOCB *iocb = opaque; 2876 NvmeRequest *req = iocb->req; 2877 NvmeNamespace *ns = req->ns; 2878 uint32_t nlb; 2879 2880 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, 2881 &nlb, NULL, NULL, NULL); 2882 2883 if (ret < 0) { 2884 iocb->ret = ret; 2885 goto out; 2886 } else if (iocb->ret < 0) { 2887 goto out; 2888 } 2889 2890 if (ns->params.zoned) { 2891 nvme_advance_zone_wp(ns, iocb->zone, nlb); 2892 } 2893 2894 iocb->idx++; 2895 iocb->slba += nlb; 2896 out: 2897 nvme_do_copy(iocb); 2898 } 2899 2900 static void nvme_copy_out_cb(void *opaque, int ret) 2901 { 2902 NvmeCopyAIOCB *iocb = opaque; 2903 NvmeRequest *req = iocb->req; 2904 NvmeNamespace *ns = req->ns; 2905 uint32_t nlb; 2906 size_t mlen; 2907 uint8_t *mbounce; 2908 2909 if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { 2910 goto out; 2911 } 2912 2913 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, 2914 &nlb, NULL, NULL, NULL); 2915 2916 mlen = nvme_m2b(ns, nlb); 2917 mbounce = iocb->bounce + nvme_l2b(ns, nlb); 2918 2919 qemu_iovec_reset(&iocb->iov); 2920 qemu_iovec_add(&iocb->iov, mbounce, mlen); 2921 2922 iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_moff(ns, iocb->slba), 2923 &iocb->iov, 0, nvme_copy_out_completed_cb, 2924 iocb); 2925 2926 return; 2927 2928 out: 2929 nvme_copy_out_completed_cb(iocb, ret); 2930 } 2931 2932 static void nvme_copy_in_completed_cb(void *opaque, int ret) 2933 { 2934 NvmeCopyAIOCB *iocb = opaque; 2935 NvmeRequest *req = iocb->req; 2936 NvmeNamespace *ns = req->ns; 2937 uint32_t nlb; 2938 uint64_t slba; 2939 uint16_t apptag, appmask; 2940 uint64_t reftag; 2941 size_t len; 2942 uint16_t status; 2943 2944 if (ret < 0) { 2945 iocb->ret = ret; 2946 goto out; 2947 } else if (iocb->ret < 0) { 2948 goto out; 2949 } 2950 2951 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, 2952 &nlb, &apptag, &appmask, &reftag); 2953 len = nvme_l2b(ns, nlb); 2954 2955 trace_pci_nvme_copy_out(iocb->slba, nlb); 2956 2957 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 2958 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; 2959 2960 uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); 2961 uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); 2962 2963 size_t mlen = nvme_m2b(ns, nlb); 2964 uint8_t *mbounce = iocb->bounce + nvme_l2b(ns, nlb); 2965 2966 status = nvme_dif_mangle_mdata(ns, mbounce, mlen, slba); 2967 if (status) { 2968 goto invalid; 2969 } 2970 status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, prinfor, 2971 slba, apptag, appmask, &reftag); 2972 if (status) { 2973 goto invalid; 2974 } 2975 2976 apptag = le16_to_cpu(copy->apptag); 2977 appmask = le16_to_cpu(copy->appmask); 2978 2979 if (prinfow & NVME_PRINFO_PRACT) { 2980 status = nvme_check_prinfo(ns, prinfow, iocb->slba, iocb->reftag); 2981 if (status) { 2982 goto invalid; 2983 } 2984 2985 nvme_dif_pract_generate_dif(ns, iocb->bounce, len, mbounce, mlen, 2986 apptag, &iocb->reftag); 2987 } else { 2988 status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, 2989 prinfow, iocb->slba, apptag, appmask, 2990 &iocb->reftag); 2991 if (status) { 2992 goto invalid; 2993 } 2994 } 2995 } 2996 2997 status = nvme_check_bounds(ns, iocb->slba, nlb); 2998 if (status) { 2999 goto invalid; 3000 } 3001 3002 if (ns->params.zoned) { 3003 status = nvme_check_zone_write(ns, iocb->zone, iocb->slba, nlb); 3004 if (status) { 3005 goto invalid; 3006 } 3007 3008 if (!(iocb->zone->d.za & NVME_ZA_ZRWA_VALID)) { 3009 iocb->zone->w_ptr += nlb; 3010 } 3011 } 3012 3013 qemu_iovec_reset(&iocb->iov); 3014 qemu_iovec_add(&iocb->iov, iocb->bounce, len); 3015 3016 iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, iocb->slba), 3017 &iocb->iov, 0, nvme_copy_out_cb, iocb); 3018 3019 return; 3020 3021 invalid: 3022 req->status = status; 3023 iocb->ret = -1; 3024 out: 3025 nvme_do_copy(iocb); 3026 } 3027 3028 static void nvme_copy_in_cb(void *opaque, int ret) 3029 { 3030 NvmeCopyAIOCB *iocb = opaque; 3031 NvmeRequest *req = iocb->req; 3032 NvmeNamespace *ns = req->ns; 3033 uint64_t slba; 3034 uint32_t nlb; 3035 3036 if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { 3037 goto out; 3038 } 3039 3040 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, 3041 &nlb, NULL, NULL, NULL); 3042 3043 qemu_iovec_reset(&iocb->iov); 3044 qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(ns, nlb), 3045 nvme_m2b(ns, nlb)); 3046 3047 iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_moff(ns, slba), 3048 &iocb->iov, 0, nvme_copy_in_completed_cb, 3049 iocb); 3050 return; 3051 3052 out: 3053 nvme_copy_in_completed_cb(iocb, ret); 3054 } 3055 3056 static void nvme_do_copy(NvmeCopyAIOCB *iocb) 3057 { 3058 NvmeRequest *req = iocb->req; 3059 NvmeNamespace *ns = req->ns; 3060 uint64_t slba; 3061 uint32_t nlb; 3062 size_t len; 3063 uint16_t status; 3064 3065 if (iocb->ret < 0) { 3066 goto done; 3067 } 3068 3069 if (iocb->idx == iocb->nr) { 3070 goto done; 3071 } 3072 3073 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, 3074 &nlb, NULL, NULL, NULL); 3075 len = nvme_l2b(ns, nlb); 3076 3077 trace_pci_nvme_copy_source_range(slba, nlb); 3078 3079 if (nlb > le16_to_cpu(ns->id_ns.mssrl)) { 3080 status = NVME_CMD_SIZE_LIMIT | NVME_DNR; 3081 goto invalid; 3082 } 3083 3084 status = nvme_check_bounds(ns, slba, nlb); 3085 if (status) { 3086 goto invalid; 3087 } 3088 3089 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { 3090 status = nvme_check_dulbe(ns, slba, nlb); 3091 if (status) { 3092 goto invalid; 3093 } 3094 } 3095 3096 if (ns->params.zoned) { 3097 status = nvme_check_zone_read(ns, slba, nlb); 3098 if (status) { 3099 goto invalid; 3100 } 3101 } 3102 3103 qemu_iovec_reset(&iocb->iov); 3104 qemu_iovec_add(&iocb->iov, iocb->bounce, len); 3105 3106 iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_l2b(ns, slba), 3107 &iocb->iov, 0, nvme_copy_in_cb, iocb); 3108 return; 3109 3110 invalid: 3111 req->status = status; 3112 iocb->ret = -1; 3113 done: 3114 nvme_copy_done(iocb); 3115 } 3116 3117 static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) 3118 { 3119 NvmeNamespace *ns = req->ns; 3120 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; 3121 NvmeCopyAIOCB *iocb = blk_aio_get(&nvme_copy_aiocb_info, ns->blkconf.blk, 3122 nvme_misc_cb, req); 3123 uint16_t nr = copy->nr + 1; 3124 uint8_t format = copy->control[0] & 0xf; 3125 uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); 3126 uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); 3127 size_t len = sizeof(NvmeCopySourceRangeFormat0); 3128 3129 uint16_t status; 3130 3131 trace_pci_nvme_copy(nvme_cid(req), nvme_nsid(ns), nr, format); 3132 3133 iocb->ranges = NULL; 3134 iocb->zone = NULL; 3135 3136 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && 3137 ((prinfor & NVME_PRINFO_PRACT) != (prinfow & NVME_PRINFO_PRACT))) { 3138 status = NVME_INVALID_FIELD | NVME_DNR; 3139 goto invalid; 3140 } 3141 3142 if (!(n->id_ctrl.ocfs & (1 << format))) { 3143 trace_pci_nvme_err_copy_invalid_format(format); 3144 status = NVME_INVALID_FIELD | NVME_DNR; 3145 goto invalid; 3146 } 3147 3148 if (nr > ns->id_ns.msrc + 1) { 3149 status = NVME_CMD_SIZE_LIMIT | NVME_DNR; 3150 goto invalid; 3151 } 3152 3153 if ((ns->pif == 0x0 && format != 0x0) || 3154 (ns->pif != 0x0 && format != 0x1)) { 3155 status = NVME_INVALID_FORMAT | NVME_DNR; 3156 goto invalid; 3157 } 3158 3159 if (ns->pif) { 3160 len = sizeof(NvmeCopySourceRangeFormat1); 3161 } 3162 3163 iocb->format = format; 3164 iocb->ranges = g_malloc_n(nr, len); 3165 status = nvme_h2c(n, (uint8_t *)iocb->ranges, len * nr, req); 3166 if (status) { 3167 goto invalid; 3168 } 3169 3170 iocb->slba = le64_to_cpu(copy->sdlba); 3171 3172 if (ns->params.zoned) { 3173 iocb->zone = nvme_get_zone_by_slba(ns, iocb->slba); 3174 if (!iocb->zone) { 3175 status = NVME_LBA_RANGE | NVME_DNR; 3176 goto invalid; 3177 } 3178 3179 status = nvme_zrm_auto(n, ns, iocb->zone); 3180 if (status) { 3181 goto invalid; 3182 } 3183 } 3184 3185 status = nvme_check_copy_mcl(ns, iocb, nr); 3186 if (status) { 3187 goto invalid; 3188 } 3189 3190 iocb->req = req; 3191 iocb->ret = 0; 3192 iocb->nr = nr; 3193 iocb->idx = 0; 3194 iocb->reftag = le32_to_cpu(copy->reftag); 3195 iocb->reftag |= (uint64_t)le32_to_cpu(copy->cdw3) << 32; 3196 iocb->bounce = g_malloc_n(le16_to_cpu(ns->id_ns.mssrl), 3197 ns->lbasz + ns->lbaf.ms); 3198 3199 qemu_iovec_init(&iocb->iov, 1); 3200 3201 block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.read, 0, 3202 BLOCK_ACCT_READ); 3203 block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.write, 0, 3204 BLOCK_ACCT_WRITE); 3205 3206 req->aiocb = &iocb->common; 3207 nvme_do_copy(iocb); 3208 3209 return NVME_NO_COMPLETE; 3210 3211 invalid: 3212 g_free(iocb->ranges); 3213 qemu_aio_unref(iocb); 3214 return status; 3215 } 3216 3217 static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req) 3218 { 3219 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 3220 NvmeNamespace *ns = req->ns; 3221 BlockBackend *blk = ns->blkconf.blk; 3222 uint64_t slba = le64_to_cpu(rw->slba); 3223 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; 3224 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); 3225 size_t data_len = nvme_l2b(ns, nlb); 3226 size_t len = data_len; 3227 int64_t offset = nvme_l2b(ns, slba); 3228 struct nvme_compare_ctx *ctx = NULL; 3229 uint16_t status; 3230 3231 trace_pci_nvme_compare(nvme_cid(req), nvme_nsid(ns), slba, nlb); 3232 3233 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && (prinfo & NVME_PRINFO_PRACT)) { 3234 return NVME_INVALID_PROT_INFO | NVME_DNR; 3235 } 3236 3237 if (nvme_ns_ext(ns)) { 3238 len += nvme_m2b(ns, nlb); 3239 } 3240 3241 status = nvme_check_mdts(n, len); 3242 if (status) { 3243 return status; 3244 } 3245 3246 status = nvme_check_bounds(ns, slba, nlb); 3247 if (status) { 3248 return status; 3249 } 3250 3251 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { 3252 status = nvme_check_dulbe(ns, slba, nlb); 3253 if (status) { 3254 return status; 3255 } 3256 } 3257 3258 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); 3259 if (status) { 3260 return status; 3261 } 3262 3263 ctx = g_new(struct nvme_compare_ctx, 1); 3264 ctx->data.bounce = g_malloc(data_len); 3265 3266 req->opaque = ctx; 3267 3268 qemu_iovec_init(&ctx->data.iov, 1); 3269 qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, data_len); 3270 3271 block_acct_start(blk_get_stats(blk), &req->acct, data_len, 3272 BLOCK_ACCT_READ); 3273 req->aiocb = blk_aio_preadv(blk, offset, &ctx->data.iov, 0, 3274 nvme_compare_data_cb, req); 3275 3276 return NVME_NO_COMPLETE; 3277 } 3278 3279 typedef struct NvmeFlushAIOCB { 3280 BlockAIOCB common; 3281 BlockAIOCB *aiocb; 3282 NvmeRequest *req; 3283 int ret; 3284 3285 NvmeNamespace *ns; 3286 uint32_t nsid; 3287 bool broadcast; 3288 } NvmeFlushAIOCB; 3289 3290 static void nvme_flush_cancel(BlockAIOCB *acb) 3291 { 3292 NvmeFlushAIOCB *iocb = container_of(acb, NvmeFlushAIOCB, common); 3293 3294 iocb->ret = -ECANCELED; 3295 3296 if (iocb->aiocb) { 3297 blk_aio_cancel_async(iocb->aiocb); 3298 iocb->aiocb = NULL; 3299 } 3300 } 3301 3302 static const AIOCBInfo nvme_flush_aiocb_info = { 3303 .aiocb_size = sizeof(NvmeFlushAIOCB), 3304 .cancel_async = nvme_flush_cancel, 3305 .get_aio_context = nvme_get_aio_context, 3306 }; 3307 3308 static void nvme_do_flush(NvmeFlushAIOCB *iocb); 3309 3310 static void nvme_flush_ns_cb(void *opaque, int ret) 3311 { 3312 NvmeFlushAIOCB *iocb = opaque; 3313 NvmeNamespace *ns = iocb->ns; 3314 3315 if (ret < 0) { 3316 iocb->ret = ret; 3317 goto out; 3318 } else if (iocb->ret < 0) { 3319 goto out; 3320 } 3321 3322 if (ns) { 3323 trace_pci_nvme_flush_ns(iocb->nsid); 3324 3325 iocb->ns = NULL; 3326 iocb->aiocb = blk_aio_flush(ns->blkconf.blk, nvme_flush_ns_cb, iocb); 3327 return; 3328 } 3329 3330 out: 3331 nvme_do_flush(iocb); 3332 } 3333 3334 static void nvme_do_flush(NvmeFlushAIOCB *iocb) 3335 { 3336 NvmeRequest *req = iocb->req; 3337 NvmeCtrl *n = nvme_ctrl(req); 3338 int i; 3339 3340 if (iocb->ret < 0) { 3341 goto done; 3342 } 3343 3344 if (iocb->broadcast) { 3345 for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { 3346 iocb->ns = nvme_ns(n, i); 3347 if (iocb->ns) { 3348 iocb->nsid = i; 3349 break; 3350 } 3351 } 3352 } 3353 3354 if (!iocb->ns) { 3355 goto done; 3356 } 3357 3358 nvme_flush_ns_cb(iocb, 0); 3359 return; 3360 3361 done: 3362 iocb->common.cb(iocb->common.opaque, iocb->ret); 3363 qemu_aio_unref(iocb); 3364 } 3365 3366 static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) 3367 { 3368 NvmeFlushAIOCB *iocb; 3369 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 3370 uint16_t status; 3371 3372 iocb = qemu_aio_get(&nvme_flush_aiocb_info, NULL, nvme_misc_cb, req); 3373 3374 iocb->req = req; 3375 iocb->ret = 0; 3376 iocb->ns = NULL; 3377 iocb->nsid = 0; 3378 iocb->broadcast = (nsid == NVME_NSID_BROADCAST); 3379 3380 if (!iocb->broadcast) { 3381 if (!nvme_nsid_valid(n, nsid)) { 3382 status = NVME_INVALID_NSID | NVME_DNR; 3383 goto out; 3384 } 3385 3386 iocb->ns = nvme_ns(n, nsid); 3387 if (!iocb->ns) { 3388 status = NVME_INVALID_FIELD | NVME_DNR; 3389 goto out; 3390 } 3391 3392 iocb->nsid = nsid; 3393 } 3394 3395 req->aiocb = &iocb->common; 3396 nvme_do_flush(iocb); 3397 3398 return NVME_NO_COMPLETE; 3399 3400 out: 3401 qemu_aio_unref(iocb); 3402 3403 return status; 3404 } 3405 3406 static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) 3407 { 3408 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 3409 NvmeNamespace *ns = req->ns; 3410 uint64_t slba = le64_to_cpu(rw->slba); 3411 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; 3412 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); 3413 uint64_t data_size = nvme_l2b(ns, nlb); 3414 uint64_t mapped_size = data_size; 3415 uint64_t data_offset; 3416 BlockBackend *blk = ns->blkconf.blk; 3417 uint16_t status; 3418 3419 if (nvme_ns_ext(ns)) { 3420 mapped_size += nvme_m2b(ns, nlb); 3421 3422 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 3423 bool pract = prinfo & NVME_PRINFO_PRACT; 3424 3425 if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { 3426 mapped_size = data_size; 3427 } 3428 } 3429 } 3430 3431 trace_pci_nvme_read(nvme_cid(req), nvme_nsid(ns), nlb, mapped_size, slba); 3432 3433 status = nvme_check_mdts(n, mapped_size); 3434 if (status) { 3435 goto invalid; 3436 } 3437 3438 status = nvme_check_bounds(ns, slba, nlb); 3439 if (status) { 3440 goto invalid; 3441 } 3442 3443 if (ns->params.zoned) { 3444 status = nvme_check_zone_read(ns, slba, nlb); 3445 if (status) { 3446 trace_pci_nvme_err_zone_read_not_ok(slba, nlb, status); 3447 goto invalid; 3448 } 3449 } 3450 3451 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { 3452 status = nvme_check_dulbe(ns, slba, nlb); 3453 if (status) { 3454 goto invalid; 3455 } 3456 } 3457 3458 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 3459 return nvme_dif_rw(n, req); 3460 } 3461 3462 status = nvme_map_data(n, nlb, req); 3463 if (status) { 3464 goto invalid; 3465 } 3466 3467 data_offset = nvme_l2b(ns, slba); 3468 3469 block_acct_start(blk_get_stats(blk), &req->acct, data_size, 3470 BLOCK_ACCT_READ); 3471 nvme_blk_read(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req); 3472 return NVME_NO_COMPLETE; 3473 3474 invalid: 3475 block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ); 3476 return status | NVME_DNR; 3477 } 3478 3479 static void nvme_do_write_fdp(NvmeCtrl *n, NvmeRequest *req, uint64_t slba, 3480 uint32_t nlb) 3481 { 3482 NvmeNamespace *ns = req->ns; 3483 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 3484 uint64_t data_size = nvme_l2b(ns, nlb); 3485 uint32_t dw12 = le32_to_cpu(req->cmd.cdw12); 3486 uint8_t dtype = (dw12 >> 20) & 0xf; 3487 uint16_t pid = le16_to_cpu(rw->dspec); 3488 uint16_t ph, rg, ruhid; 3489 NvmeReclaimUnit *ru; 3490 3491 if (dtype != NVME_DIRECTIVE_DATA_PLACEMENT || 3492 !nvme_parse_pid(ns, pid, &ph, &rg)) { 3493 ph = 0; 3494 rg = 0; 3495 } 3496 3497 ruhid = ns->fdp.phs[ph]; 3498 ru = &ns->endgrp->fdp.ruhs[ruhid].rus[rg]; 3499 3500 nvme_fdp_stat_inc(&ns->endgrp->fdp.hbmw, data_size); 3501 nvme_fdp_stat_inc(&ns->endgrp->fdp.mbmw, data_size); 3502 3503 while (nlb) { 3504 if (nlb < ru->ruamw) { 3505 ru->ruamw -= nlb; 3506 break; 3507 } 3508 3509 nlb -= ru->ruamw; 3510 nvme_update_ruh(n, ns, pid); 3511 } 3512 } 3513 3514 static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, 3515 bool wrz) 3516 { 3517 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; 3518 NvmeNamespace *ns = req->ns; 3519 uint64_t slba = le64_to_cpu(rw->slba); 3520 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; 3521 uint16_t ctrl = le16_to_cpu(rw->control); 3522 uint8_t prinfo = NVME_RW_PRINFO(ctrl); 3523 uint64_t data_size = nvme_l2b(ns, nlb); 3524 uint64_t mapped_size = data_size; 3525 uint64_t data_offset; 3526 NvmeZone *zone; 3527 NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe; 3528 BlockBackend *blk = ns->blkconf.blk; 3529 uint16_t status; 3530 3531 if (nvme_ns_ext(ns)) { 3532 mapped_size += nvme_m2b(ns, nlb); 3533 3534 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 3535 bool pract = prinfo & NVME_PRINFO_PRACT; 3536 3537 if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { 3538 mapped_size -= nvme_m2b(ns, nlb); 3539 } 3540 } 3541 } 3542 3543 trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode), 3544 nvme_nsid(ns), nlb, mapped_size, slba); 3545 3546 if (!wrz) { 3547 status = nvme_check_mdts(n, mapped_size); 3548 if (status) { 3549 goto invalid; 3550 } 3551 } 3552 3553 status = nvme_check_bounds(ns, slba, nlb); 3554 if (status) { 3555 goto invalid; 3556 } 3557 3558 if (ns->params.zoned) { 3559 zone = nvme_get_zone_by_slba(ns, slba); 3560 assert(zone); 3561 3562 if (append) { 3563 bool piremap = !!(ctrl & NVME_RW_PIREMAP); 3564 3565 if (unlikely(zone->d.za & NVME_ZA_ZRWA_VALID)) { 3566 return NVME_INVALID_ZONE_OP | NVME_DNR; 3567 } 3568 3569 if (unlikely(slba != zone->d.zslba)) { 3570 trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba); 3571 status = NVME_INVALID_FIELD; 3572 goto invalid; 3573 } 3574 3575 if (n->params.zasl && 3576 data_size > (uint64_t)n->page_size << n->params.zasl) { 3577 trace_pci_nvme_err_zasl(data_size); 3578 return NVME_INVALID_FIELD | NVME_DNR; 3579 } 3580 3581 slba = zone->w_ptr; 3582 rw->slba = cpu_to_le64(slba); 3583 res->slba = cpu_to_le64(slba); 3584 3585 switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 3586 case NVME_ID_NS_DPS_TYPE_1: 3587 if (!piremap) { 3588 return NVME_INVALID_PROT_INFO | NVME_DNR; 3589 } 3590 3591 /* fallthrough */ 3592 3593 case NVME_ID_NS_DPS_TYPE_2: 3594 if (piremap) { 3595 uint32_t reftag = le32_to_cpu(rw->reftag); 3596 rw->reftag = cpu_to_le32(reftag + (slba - zone->d.zslba)); 3597 } 3598 3599 break; 3600 3601 case NVME_ID_NS_DPS_TYPE_3: 3602 if (piremap) { 3603 return NVME_INVALID_PROT_INFO | NVME_DNR; 3604 } 3605 3606 break; 3607 } 3608 } 3609 3610 status = nvme_check_zone_write(ns, zone, slba, nlb); 3611 if (status) { 3612 goto invalid; 3613 } 3614 3615 status = nvme_zrm_auto(n, ns, zone); 3616 if (status) { 3617 goto invalid; 3618 } 3619 3620 if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { 3621 zone->w_ptr += nlb; 3622 } 3623 } else if (ns->endgrp && ns->endgrp->fdp.enabled) { 3624 nvme_do_write_fdp(n, req, slba, nlb); 3625 } 3626 3627 data_offset = nvme_l2b(ns, slba); 3628 3629 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { 3630 return nvme_dif_rw(n, req); 3631 } 3632 3633 if (!wrz) { 3634 status = nvme_map_data(n, nlb, req); 3635 if (status) { 3636 goto invalid; 3637 } 3638 3639 block_acct_start(blk_get_stats(blk), &req->acct, data_size, 3640 BLOCK_ACCT_WRITE); 3641 nvme_blk_write(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req); 3642 } else { 3643 req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size, 3644 BDRV_REQ_MAY_UNMAP, nvme_rw_cb, 3645 req); 3646 } 3647 3648 return NVME_NO_COMPLETE; 3649 3650 invalid: 3651 block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE); 3652 return status | NVME_DNR; 3653 } 3654 3655 static inline uint16_t nvme_write(NvmeCtrl *n, NvmeRequest *req) 3656 { 3657 return nvme_do_write(n, req, false, false); 3658 } 3659 3660 static inline uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) 3661 { 3662 return nvme_do_write(n, req, false, true); 3663 } 3664 3665 static inline uint16_t nvme_zone_append(NvmeCtrl *n, NvmeRequest *req) 3666 { 3667 return nvme_do_write(n, req, true, false); 3668 } 3669 3670 static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace *ns, NvmeCmd *c, 3671 uint64_t *slba, uint32_t *zone_idx) 3672 { 3673 uint32_t dw10 = le32_to_cpu(c->cdw10); 3674 uint32_t dw11 = le32_to_cpu(c->cdw11); 3675 3676 if (!ns->params.zoned) { 3677 trace_pci_nvme_err_invalid_opc(c->opcode); 3678 return NVME_INVALID_OPCODE | NVME_DNR; 3679 } 3680 3681 *slba = ((uint64_t)dw11) << 32 | dw10; 3682 if (unlikely(*slba >= ns->id_ns.nsze)) { 3683 trace_pci_nvme_err_invalid_lba_range(*slba, 0, ns->id_ns.nsze); 3684 *slba = 0; 3685 return NVME_LBA_RANGE | NVME_DNR; 3686 } 3687 3688 *zone_idx = nvme_zone_idx(ns, *slba); 3689 assert(*zone_idx < ns->num_zones); 3690 3691 return NVME_SUCCESS; 3692 } 3693 3694 typedef uint16_t (*op_handler_t)(NvmeNamespace *, NvmeZone *, NvmeZoneState, 3695 NvmeRequest *); 3696 3697 enum NvmeZoneProcessingMask { 3698 NVME_PROC_CURRENT_ZONE = 0, 3699 NVME_PROC_OPENED_ZONES = 1 << 0, 3700 NVME_PROC_CLOSED_ZONES = 1 << 1, 3701 NVME_PROC_READ_ONLY_ZONES = 1 << 2, 3702 NVME_PROC_FULL_ZONES = 1 << 3, 3703 }; 3704 3705 static uint16_t nvme_open_zone(NvmeNamespace *ns, NvmeZone *zone, 3706 NvmeZoneState state, NvmeRequest *req) 3707 { 3708 NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd; 3709 int flags = 0; 3710 3711 if (cmd->zsflags & NVME_ZSFLAG_ZRWA_ALLOC) { 3712 uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs); 3713 3714 if (!(ozcs & NVME_ID_NS_ZONED_OZCS_ZRWASUP)) { 3715 return NVME_INVALID_ZONE_OP | NVME_DNR; 3716 } 3717 3718 if (zone->w_ptr % ns->zns.zrwafg) { 3719 return NVME_NOZRWA | NVME_DNR; 3720 } 3721 3722 flags = NVME_ZRM_ZRWA; 3723 } 3724 3725 return nvme_zrm_open_flags(nvme_ctrl(req), ns, zone, flags); 3726 } 3727 3728 static uint16_t nvme_close_zone(NvmeNamespace *ns, NvmeZone *zone, 3729 NvmeZoneState state, NvmeRequest *req) 3730 { 3731 return nvme_zrm_close(ns, zone); 3732 } 3733 3734 static uint16_t nvme_finish_zone(NvmeNamespace *ns, NvmeZone *zone, 3735 NvmeZoneState state, NvmeRequest *req) 3736 { 3737 return nvme_zrm_finish(ns, zone); 3738 } 3739 3740 static uint16_t nvme_offline_zone(NvmeNamespace *ns, NvmeZone *zone, 3741 NvmeZoneState state, NvmeRequest *req) 3742 { 3743 switch (state) { 3744 case NVME_ZONE_STATE_READ_ONLY: 3745 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_OFFLINE); 3746 /* fall through */ 3747 case NVME_ZONE_STATE_OFFLINE: 3748 return NVME_SUCCESS; 3749 default: 3750 return NVME_ZONE_INVAL_TRANSITION; 3751 } 3752 } 3753 3754 static uint16_t nvme_set_zd_ext(NvmeNamespace *ns, NvmeZone *zone) 3755 { 3756 uint16_t status; 3757 uint8_t state = nvme_get_zone_state(zone); 3758 3759 if (state == NVME_ZONE_STATE_EMPTY) { 3760 status = nvme_aor_check(ns, 1, 0); 3761 if (status) { 3762 return status; 3763 } 3764 nvme_aor_inc_active(ns); 3765 zone->d.za |= NVME_ZA_ZD_EXT_VALID; 3766 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED); 3767 return NVME_SUCCESS; 3768 } 3769 3770 return NVME_ZONE_INVAL_TRANSITION; 3771 } 3772 3773 static uint16_t nvme_bulk_proc_zone(NvmeNamespace *ns, NvmeZone *zone, 3774 enum NvmeZoneProcessingMask proc_mask, 3775 op_handler_t op_hndlr, NvmeRequest *req) 3776 { 3777 uint16_t status = NVME_SUCCESS; 3778 NvmeZoneState zs = nvme_get_zone_state(zone); 3779 bool proc_zone; 3780 3781 switch (zs) { 3782 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 3783 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 3784 proc_zone = proc_mask & NVME_PROC_OPENED_ZONES; 3785 break; 3786 case NVME_ZONE_STATE_CLOSED: 3787 proc_zone = proc_mask & NVME_PROC_CLOSED_ZONES; 3788 break; 3789 case NVME_ZONE_STATE_READ_ONLY: 3790 proc_zone = proc_mask & NVME_PROC_READ_ONLY_ZONES; 3791 break; 3792 case NVME_ZONE_STATE_FULL: 3793 proc_zone = proc_mask & NVME_PROC_FULL_ZONES; 3794 break; 3795 default: 3796 proc_zone = false; 3797 } 3798 3799 if (proc_zone) { 3800 status = op_hndlr(ns, zone, zs, req); 3801 } 3802 3803 return status; 3804 } 3805 3806 static uint16_t nvme_do_zone_op(NvmeNamespace *ns, NvmeZone *zone, 3807 enum NvmeZoneProcessingMask proc_mask, 3808 op_handler_t op_hndlr, NvmeRequest *req) 3809 { 3810 NvmeZone *next; 3811 uint16_t status = NVME_SUCCESS; 3812 int i; 3813 3814 if (!proc_mask) { 3815 status = op_hndlr(ns, zone, nvme_get_zone_state(zone), req); 3816 } else { 3817 if (proc_mask & NVME_PROC_CLOSED_ZONES) { 3818 QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) { 3819 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, 3820 req); 3821 if (status && status != NVME_NO_COMPLETE) { 3822 goto out; 3823 } 3824 } 3825 } 3826 if (proc_mask & NVME_PROC_OPENED_ZONES) { 3827 QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) { 3828 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, 3829 req); 3830 if (status && status != NVME_NO_COMPLETE) { 3831 goto out; 3832 } 3833 } 3834 3835 QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) { 3836 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, 3837 req); 3838 if (status && status != NVME_NO_COMPLETE) { 3839 goto out; 3840 } 3841 } 3842 } 3843 if (proc_mask & NVME_PROC_FULL_ZONES) { 3844 QTAILQ_FOREACH_SAFE(zone, &ns->full_zones, entry, next) { 3845 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, 3846 req); 3847 if (status && status != NVME_NO_COMPLETE) { 3848 goto out; 3849 } 3850 } 3851 } 3852 3853 if (proc_mask & NVME_PROC_READ_ONLY_ZONES) { 3854 for (i = 0; i < ns->num_zones; i++, zone++) { 3855 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, 3856 req); 3857 if (status && status != NVME_NO_COMPLETE) { 3858 goto out; 3859 } 3860 } 3861 } 3862 } 3863 3864 out: 3865 return status; 3866 } 3867 3868 typedef struct NvmeZoneResetAIOCB { 3869 BlockAIOCB common; 3870 BlockAIOCB *aiocb; 3871 NvmeRequest *req; 3872 int ret; 3873 3874 bool all; 3875 int idx; 3876 NvmeZone *zone; 3877 } NvmeZoneResetAIOCB; 3878 3879 static void nvme_zone_reset_cancel(BlockAIOCB *aiocb) 3880 { 3881 NvmeZoneResetAIOCB *iocb = container_of(aiocb, NvmeZoneResetAIOCB, common); 3882 NvmeRequest *req = iocb->req; 3883 NvmeNamespace *ns = req->ns; 3884 3885 iocb->idx = ns->num_zones; 3886 3887 iocb->ret = -ECANCELED; 3888 3889 if (iocb->aiocb) { 3890 blk_aio_cancel_async(iocb->aiocb); 3891 iocb->aiocb = NULL; 3892 } 3893 } 3894 3895 static const AIOCBInfo nvme_zone_reset_aiocb_info = { 3896 .aiocb_size = sizeof(NvmeZoneResetAIOCB), 3897 .cancel_async = nvme_zone_reset_cancel, 3898 }; 3899 3900 static void nvme_zone_reset_cb(void *opaque, int ret); 3901 3902 static void nvme_zone_reset_epilogue_cb(void *opaque, int ret) 3903 { 3904 NvmeZoneResetAIOCB *iocb = opaque; 3905 NvmeRequest *req = iocb->req; 3906 NvmeNamespace *ns = req->ns; 3907 int64_t moff; 3908 int count; 3909 3910 if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { 3911 goto out; 3912 } 3913 3914 moff = nvme_moff(ns, iocb->zone->d.zslba); 3915 count = nvme_m2b(ns, ns->zone_size); 3916 3917 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, moff, count, 3918 BDRV_REQ_MAY_UNMAP, 3919 nvme_zone_reset_cb, iocb); 3920 return; 3921 3922 out: 3923 nvme_zone_reset_cb(iocb, ret); 3924 } 3925 3926 static void nvme_zone_reset_cb(void *opaque, int ret) 3927 { 3928 NvmeZoneResetAIOCB *iocb = opaque; 3929 NvmeRequest *req = iocb->req; 3930 NvmeNamespace *ns = req->ns; 3931 3932 if (iocb->ret < 0) { 3933 goto done; 3934 } else if (ret < 0) { 3935 iocb->ret = ret; 3936 goto done; 3937 } 3938 3939 if (iocb->zone) { 3940 nvme_zrm_reset(ns, iocb->zone); 3941 3942 if (!iocb->all) { 3943 goto done; 3944 } 3945 } 3946 3947 while (iocb->idx < ns->num_zones) { 3948 NvmeZone *zone = &ns->zone_array[iocb->idx++]; 3949 3950 switch (nvme_get_zone_state(zone)) { 3951 case NVME_ZONE_STATE_EMPTY: 3952 if (!iocb->all) { 3953 goto done; 3954 } 3955 3956 continue; 3957 3958 case NVME_ZONE_STATE_EXPLICITLY_OPEN: 3959 case NVME_ZONE_STATE_IMPLICITLY_OPEN: 3960 case NVME_ZONE_STATE_CLOSED: 3961 case NVME_ZONE_STATE_FULL: 3962 iocb->zone = zone; 3963 break; 3964 3965 default: 3966 continue; 3967 } 3968 3969 trace_pci_nvme_zns_zone_reset(zone->d.zslba); 3970 3971 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, 3972 nvme_l2b(ns, zone->d.zslba), 3973 nvme_l2b(ns, ns->zone_size), 3974 BDRV_REQ_MAY_UNMAP, 3975 nvme_zone_reset_epilogue_cb, 3976 iocb); 3977 return; 3978 } 3979 3980 done: 3981 iocb->aiocb = NULL; 3982 3983 iocb->common.cb(iocb->common.opaque, iocb->ret); 3984 qemu_aio_unref(iocb); 3985 } 3986 3987 static uint16_t nvme_zone_mgmt_send_zrwa_flush(NvmeCtrl *n, NvmeZone *zone, 3988 uint64_t elba, NvmeRequest *req) 3989 { 3990 NvmeNamespace *ns = req->ns; 3991 uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs); 3992 uint64_t wp = zone->d.wp; 3993 uint32_t nlb = elba - wp + 1; 3994 uint16_t status; 3995 3996 3997 if (!(ozcs & NVME_ID_NS_ZONED_OZCS_ZRWASUP)) { 3998 return NVME_INVALID_ZONE_OP | NVME_DNR; 3999 } 4000 4001 if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { 4002 return NVME_INVALID_FIELD | NVME_DNR; 4003 } 4004 4005 if (elba < wp || elba > wp + ns->zns.zrwas) { 4006 return NVME_ZONE_BOUNDARY_ERROR | NVME_DNR; 4007 } 4008 4009 if (nlb % ns->zns.zrwafg) { 4010 return NVME_INVALID_FIELD | NVME_DNR; 4011 } 4012 4013 status = nvme_zrm_auto(n, ns, zone); 4014 if (status) { 4015 return status; 4016 } 4017 4018 zone->w_ptr += nlb; 4019 4020 nvme_advance_zone_wp(ns, zone, nlb); 4021 4022 return NVME_SUCCESS; 4023 } 4024 4025 static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req) 4026 { 4027 NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd; 4028 NvmeNamespace *ns = req->ns; 4029 NvmeZone *zone; 4030 NvmeZoneResetAIOCB *iocb; 4031 uint8_t *zd_ext; 4032 uint64_t slba = 0; 4033 uint32_t zone_idx = 0; 4034 uint16_t status; 4035 uint8_t action = cmd->zsa; 4036 bool all; 4037 enum NvmeZoneProcessingMask proc_mask = NVME_PROC_CURRENT_ZONE; 4038 4039 all = cmd->zsflags & NVME_ZSFLAG_SELECT_ALL; 4040 4041 req->status = NVME_SUCCESS; 4042 4043 if (!all) { 4044 status = nvme_get_mgmt_zone_slba_idx(ns, &req->cmd, &slba, &zone_idx); 4045 if (status) { 4046 return status; 4047 } 4048 } 4049 4050 zone = &ns->zone_array[zone_idx]; 4051 if (slba != zone->d.zslba && action != NVME_ZONE_ACTION_ZRWA_FLUSH) { 4052 trace_pci_nvme_err_unaligned_zone_cmd(action, slba, zone->d.zslba); 4053 return NVME_INVALID_FIELD | NVME_DNR; 4054 } 4055 4056 switch (action) { 4057 4058 case NVME_ZONE_ACTION_OPEN: 4059 if (all) { 4060 proc_mask = NVME_PROC_CLOSED_ZONES; 4061 } 4062 trace_pci_nvme_open_zone(slba, zone_idx, all); 4063 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_open_zone, req); 4064 break; 4065 4066 case NVME_ZONE_ACTION_CLOSE: 4067 if (all) { 4068 proc_mask = NVME_PROC_OPENED_ZONES; 4069 } 4070 trace_pci_nvme_close_zone(slba, zone_idx, all); 4071 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_close_zone, req); 4072 break; 4073 4074 case NVME_ZONE_ACTION_FINISH: 4075 if (all) { 4076 proc_mask = NVME_PROC_OPENED_ZONES | NVME_PROC_CLOSED_ZONES; 4077 } 4078 trace_pci_nvme_finish_zone(slba, zone_idx, all); 4079 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_finish_zone, req); 4080 break; 4081 4082 case NVME_ZONE_ACTION_RESET: 4083 trace_pci_nvme_reset_zone(slba, zone_idx, all); 4084 4085 iocb = blk_aio_get(&nvme_zone_reset_aiocb_info, ns->blkconf.blk, 4086 nvme_misc_cb, req); 4087 4088 iocb->req = req; 4089 iocb->ret = 0; 4090 iocb->all = all; 4091 iocb->idx = zone_idx; 4092 iocb->zone = NULL; 4093 4094 req->aiocb = &iocb->common; 4095 nvme_zone_reset_cb(iocb, 0); 4096 4097 return NVME_NO_COMPLETE; 4098 4099 case NVME_ZONE_ACTION_OFFLINE: 4100 if (all) { 4101 proc_mask = NVME_PROC_READ_ONLY_ZONES; 4102 } 4103 trace_pci_nvme_offline_zone(slba, zone_idx, all); 4104 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_offline_zone, req); 4105 break; 4106 4107 case NVME_ZONE_ACTION_SET_ZD_EXT: 4108 trace_pci_nvme_set_descriptor_extension(slba, zone_idx); 4109 if (all || !ns->params.zd_extension_size) { 4110 return NVME_INVALID_FIELD | NVME_DNR; 4111 } 4112 zd_ext = nvme_get_zd_extension(ns, zone_idx); 4113 status = nvme_h2c(n, zd_ext, ns->params.zd_extension_size, req); 4114 if (status) { 4115 trace_pci_nvme_err_zd_extension_map_error(zone_idx); 4116 return status; 4117 } 4118 4119 status = nvme_set_zd_ext(ns, zone); 4120 if (status == NVME_SUCCESS) { 4121 trace_pci_nvme_zd_extension_set(zone_idx); 4122 return status; 4123 } 4124 break; 4125 4126 case NVME_ZONE_ACTION_ZRWA_FLUSH: 4127 if (all) { 4128 return NVME_INVALID_FIELD | NVME_DNR; 4129 } 4130 4131 return nvme_zone_mgmt_send_zrwa_flush(n, zone, slba, req); 4132 4133 default: 4134 trace_pci_nvme_err_invalid_mgmt_action(action); 4135 status = NVME_INVALID_FIELD; 4136 } 4137 4138 if (status == NVME_ZONE_INVAL_TRANSITION) { 4139 trace_pci_nvme_err_invalid_zone_state_transition(action, slba, 4140 zone->d.za); 4141 } 4142 if (status) { 4143 status |= NVME_DNR; 4144 } 4145 4146 return status; 4147 } 4148 4149 static bool nvme_zone_matches_filter(uint32_t zafs, NvmeZone *zl) 4150 { 4151 NvmeZoneState zs = nvme_get_zone_state(zl); 4152 4153 switch (zafs) { 4154 case NVME_ZONE_REPORT_ALL: 4155 return true; 4156 case NVME_ZONE_REPORT_EMPTY: 4157 return zs == NVME_ZONE_STATE_EMPTY; 4158 case NVME_ZONE_REPORT_IMPLICITLY_OPEN: 4159 return zs == NVME_ZONE_STATE_IMPLICITLY_OPEN; 4160 case NVME_ZONE_REPORT_EXPLICITLY_OPEN: 4161 return zs == NVME_ZONE_STATE_EXPLICITLY_OPEN; 4162 case NVME_ZONE_REPORT_CLOSED: 4163 return zs == NVME_ZONE_STATE_CLOSED; 4164 case NVME_ZONE_REPORT_FULL: 4165 return zs == NVME_ZONE_STATE_FULL; 4166 case NVME_ZONE_REPORT_READ_ONLY: 4167 return zs == NVME_ZONE_STATE_READ_ONLY; 4168 case NVME_ZONE_REPORT_OFFLINE: 4169 return zs == NVME_ZONE_STATE_OFFLINE; 4170 default: 4171 return false; 4172 } 4173 } 4174 4175 static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) 4176 { 4177 NvmeCmd *cmd = (NvmeCmd *)&req->cmd; 4178 NvmeNamespace *ns = req->ns; 4179 /* cdw12 is zero-based number of dwords to return. Convert to bytes */ 4180 uint32_t data_size = (le32_to_cpu(cmd->cdw12) + 1) << 2; 4181 uint32_t dw13 = le32_to_cpu(cmd->cdw13); 4182 uint32_t zone_idx, zra, zrasf, partial; 4183 uint64_t max_zones, nr_zones = 0; 4184 uint16_t status; 4185 uint64_t slba; 4186 NvmeZoneDescr *z; 4187 NvmeZone *zone; 4188 NvmeZoneReportHeader *header; 4189 void *buf, *buf_p; 4190 size_t zone_entry_sz; 4191 int i; 4192 4193 req->status = NVME_SUCCESS; 4194 4195 status = nvme_get_mgmt_zone_slba_idx(ns, cmd, &slba, &zone_idx); 4196 if (status) { 4197 return status; 4198 } 4199 4200 zra = dw13 & 0xff; 4201 if (zra != NVME_ZONE_REPORT && zra != NVME_ZONE_REPORT_EXTENDED) { 4202 return NVME_INVALID_FIELD | NVME_DNR; 4203 } 4204 if (zra == NVME_ZONE_REPORT_EXTENDED && !ns->params.zd_extension_size) { 4205 return NVME_INVALID_FIELD | NVME_DNR; 4206 } 4207 4208 zrasf = (dw13 >> 8) & 0xff; 4209 if (zrasf > NVME_ZONE_REPORT_OFFLINE) { 4210 return NVME_INVALID_FIELD | NVME_DNR; 4211 } 4212 4213 if (data_size < sizeof(NvmeZoneReportHeader)) { 4214 return NVME_INVALID_FIELD | NVME_DNR; 4215 } 4216 4217 status = nvme_check_mdts(n, data_size); 4218 if (status) { 4219 return status; 4220 } 4221 4222 partial = (dw13 >> 16) & 0x01; 4223 4224 zone_entry_sz = sizeof(NvmeZoneDescr); 4225 if (zra == NVME_ZONE_REPORT_EXTENDED) { 4226 zone_entry_sz += ns->params.zd_extension_size; 4227 } 4228 4229 max_zones = (data_size - sizeof(NvmeZoneReportHeader)) / zone_entry_sz; 4230 buf = g_malloc0(data_size); 4231 4232 zone = &ns->zone_array[zone_idx]; 4233 for (i = zone_idx; i < ns->num_zones; i++) { 4234 if (partial && nr_zones >= max_zones) { 4235 break; 4236 } 4237 if (nvme_zone_matches_filter(zrasf, zone++)) { 4238 nr_zones++; 4239 } 4240 } 4241 header = buf; 4242 header->nr_zones = cpu_to_le64(nr_zones); 4243 4244 buf_p = buf + sizeof(NvmeZoneReportHeader); 4245 for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) { 4246 zone = &ns->zone_array[zone_idx]; 4247 if (nvme_zone_matches_filter(zrasf, zone)) { 4248 z = buf_p; 4249 buf_p += sizeof(NvmeZoneDescr); 4250 4251 z->zt = zone->d.zt; 4252 z->zs = zone->d.zs; 4253 z->zcap = cpu_to_le64(zone->d.zcap); 4254 z->zslba = cpu_to_le64(zone->d.zslba); 4255 z->za = zone->d.za; 4256 4257 if (nvme_wp_is_valid(zone)) { 4258 z->wp = cpu_to_le64(zone->d.wp); 4259 } else { 4260 z->wp = cpu_to_le64(~0ULL); 4261 } 4262 4263 if (zra == NVME_ZONE_REPORT_EXTENDED) { 4264 if (zone->d.za & NVME_ZA_ZD_EXT_VALID) { 4265 memcpy(buf_p, nvme_get_zd_extension(ns, zone_idx), 4266 ns->params.zd_extension_size); 4267 } 4268 buf_p += ns->params.zd_extension_size; 4269 } 4270 4271 max_zones--; 4272 } 4273 } 4274 4275 status = nvme_c2h(n, (uint8_t *)buf, data_size, req); 4276 4277 g_free(buf); 4278 4279 return status; 4280 } 4281 4282 static uint16_t nvme_io_mgmt_recv_ruhs(NvmeCtrl *n, NvmeRequest *req, 4283 size_t len) 4284 { 4285 NvmeNamespace *ns = req->ns; 4286 NvmeEnduranceGroup *endgrp; 4287 NvmeRuhStatus *hdr; 4288 NvmeRuhStatusDescr *ruhsd; 4289 unsigned int nruhsd; 4290 uint16_t rg, ph, *ruhid; 4291 size_t trans_len; 4292 g_autofree uint8_t *buf = NULL; 4293 4294 if (!n->subsys) { 4295 return NVME_INVALID_FIELD | NVME_DNR; 4296 } 4297 4298 if (ns->params.nsid == 0 || ns->params.nsid == 0xffffffff) { 4299 return NVME_INVALID_NSID | NVME_DNR; 4300 } 4301 4302 if (!n->subsys->endgrp.fdp.enabled) { 4303 return NVME_FDP_DISABLED | NVME_DNR; 4304 } 4305 4306 endgrp = ns->endgrp; 4307 4308 nruhsd = ns->fdp.nphs * endgrp->fdp.nrg; 4309 trans_len = sizeof(NvmeRuhStatus) + nruhsd * sizeof(NvmeRuhStatusDescr); 4310 buf = g_malloc(trans_len); 4311 4312 trans_len = MIN(trans_len, len); 4313 4314 hdr = (NvmeRuhStatus *)buf; 4315 ruhsd = (NvmeRuhStatusDescr *)(buf + sizeof(NvmeRuhStatus)); 4316 4317 hdr->nruhsd = cpu_to_le16(nruhsd); 4318 4319 ruhid = ns->fdp.phs; 4320 4321 for (ph = 0; ph < ns->fdp.nphs; ph++, ruhid++) { 4322 NvmeRuHandle *ruh = &endgrp->fdp.ruhs[*ruhid]; 4323 4324 for (rg = 0; rg < endgrp->fdp.nrg; rg++, ruhsd++) { 4325 uint16_t pid = nvme_make_pid(ns, rg, ph); 4326 4327 ruhsd->pid = cpu_to_le16(pid); 4328 ruhsd->ruhid = *ruhid; 4329 ruhsd->earutr = 0; 4330 ruhsd->ruamw = cpu_to_le64(ruh->rus[rg].ruamw); 4331 } 4332 } 4333 4334 return nvme_c2h(n, buf, trans_len, req); 4335 } 4336 4337 static uint16_t nvme_io_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) 4338 { 4339 NvmeCmd *cmd = &req->cmd; 4340 uint32_t cdw10 = le32_to_cpu(cmd->cdw10); 4341 uint32_t numd = le32_to_cpu(cmd->cdw11); 4342 uint8_t mo = (cdw10 & 0xff); 4343 size_t len = (numd + 1) << 2; 4344 4345 switch (mo) { 4346 case NVME_IOMR_MO_NOP: 4347 return 0; 4348 case NVME_IOMR_MO_RUH_STATUS: 4349 return nvme_io_mgmt_recv_ruhs(n, req, len); 4350 default: 4351 return NVME_INVALID_FIELD | NVME_DNR; 4352 }; 4353 } 4354 4355 static uint16_t nvme_io_mgmt_send_ruh_update(NvmeCtrl *n, NvmeRequest *req) 4356 { 4357 NvmeCmd *cmd = &req->cmd; 4358 NvmeNamespace *ns = req->ns; 4359 uint32_t cdw10 = le32_to_cpu(cmd->cdw10); 4360 uint16_t ret = NVME_SUCCESS; 4361 uint32_t npid = (cdw10 >> 1) + 1; 4362 unsigned int i = 0; 4363 g_autofree uint16_t *pids = NULL; 4364 uint32_t maxnpid = n->subsys->endgrp.fdp.nrg * n->subsys->endgrp.fdp.nruh; 4365 4366 if (unlikely(npid >= MIN(NVME_FDP_MAXPIDS, maxnpid))) { 4367 return NVME_INVALID_FIELD | NVME_DNR; 4368 } 4369 4370 pids = g_new(uint16_t, npid); 4371 4372 ret = nvme_h2c(n, pids, npid * sizeof(uint16_t), req); 4373 if (ret) { 4374 return ret; 4375 } 4376 4377 for (; i < npid; i++) { 4378 if (!nvme_update_ruh(n, ns, pids[i])) { 4379 return NVME_INVALID_FIELD | NVME_DNR; 4380 } 4381 } 4382 4383 return ret; 4384 } 4385 4386 static uint16_t nvme_io_mgmt_send(NvmeCtrl *n, NvmeRequest *req) 4387 { 4388 NvmeCmd *cmd = &req->cmd; 4389 uint32_t cdw10 = le32_to_cpu(cmd->cdw10); 4390 uint8_t mo = (cdw10 & 0xff); 4391 4392 switch (mo) { 4393 case NVME_IOMS_MO_NOP: 4394 return 0; 4395 case NVME_IOMS_MO_RUH_UPDATE: 4396 return nvme_io_mgmt_send_ruh_update(n, req); 4397 default: 4398 return NVME_INVALID_FIELD | NVME_DNR; 4399 }; 4400 } 4401 4402 static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) 4403 { 4404 NvmeNamespace *ns; 4405 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 4406 4407 trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req), 4408 req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode)); 4409 4410 if (!nvme_nsid_valid(n, nsid)) { 4411 return NVME_INVALID_NSID | NVME_DNR; 4412 } 4413 4414 /* 4415 * In the base NVM command set, Flush may apply to all namespaces 4416 * (indicated by NSID being set to FFFFFFFFh). But if that feature is used 4417 * along with TP 4056 (Namespace Types), it may be pretty screwed up. 4418 * 4419 * If NSID is indeed set to FFFFFFFFh, we simply cannot associate the 4420 * opcode with a specific command since we cannot determine a unique I/O 4421 * command set. Opcode 0h could have any other meaning than something 4422 * equivalent to flushing and say it DOES have completely different 4423 * semantics in some other command set - does an NSID of FFFFFFFFh then 4424 * mean "for all namespaces, apply whatever command set specific command 4425 * that uses the 0h opcode?" Or does it mean "for all namespaces, apply 4426 * whatever command that uses the 0h opcode if, and only if, it allows NSID 4427 * to be FFFFFFFFh"? 4428 * 4429 * Anyway (and luckily), for now, we do not care about this since the 4430 * device only supports namespace types that includes the NVM Flush command 4431 * (NVM and Zoned), so always do an NVM Flush. 4432 */ 4433 if (req->cmd.opcode == NVME_CMD_FLUSH) { 4434 return nvme_flush(n, req); 4435 } 4436 4437 ns = nvme_ns(n, nsid); 4438 if (unlikely(!ns)) { 4439 return NVME_INVALID_FIELD | NVME_DNR; 4440 } 4441 4442 if (!(ns->iocs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { 4443 trace_pci_nvme_err_invalid_opc(req->cmd.opcode); 4444 return NVME_INVALID_OPCODE | NVME_DNR; 4445 } 4446 4447 if (ns->status) { 4448 return ns->status; 4449 } 4450 4451 if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { 4452 return NVME_INVALID_FIELD; 4453 } 4454 4455 req->ns = ns; 4456 4457 switch (req->cmd.opcode) { 4458 case NVME_CMD_WRITE_ZEROES: 4459 return nvme_write_zeroes(n, req); 4460 case NVME_CMD_ZONE_APPEND: 4461 return nvme_zone_append(n, req); 4462 case NVME_CMD_WRITE: 4463 return nvme_write(n, req); 4464 case NVME_CMD_READ: 4465 return nvme_read(n, req); 4466 case NVME_CMD_COMPARE: 4467 return nvme_compare(n, req); 4468 case NVME_CMD_DSM: 4469 return nvme_dsm(n, req); 4470 case NVME_CMD_VERIFY: 4471 return nvme_verify(n, req); 4472 case NVME_CMD_COPY: 4473 return nvme_copy(n, req); 4474 case NVME_CMD_ZONE_MGMT_SEND: 4475 return nvme_zone_mgmt_send(n, req); 4476 case NVME_CMD_ZONE_MGMT_RECV: 4477 return nvme_zone_mgmt_recv(n, req); 4478 case NVME_CMD_IO_MGMT_RECV: 4479 return nvme_io_mgmt_recv(n, req); 4480 case NVME_CMD_IO_MGMT_SEND: 4481 return nvme_io_mgmt_send(n, req); 4482 default: 4483 assert(false); 4484 } 4485 4486 return NVME_INVALID_OPCODE | NVME_DNR; 4487 } 4488 4489 static void nvme_cq_notifier(EventNotifier *e) 4490 { 4491 NvmeCQueue *cq = container_of(e, NvmeCQueue, notifier); 4492 NvmeCtrl *n = cq->ctrl; 4493 4494 if (!event_notifier_test_and_clear(e)) { 4495 return; 4496 } 4497 4498 nvme_update_cq_head(cq); 4499 4500 if (cq->tail == cq->head) { 4501 if (cq->irq_enabled) { 4502 n->cq_pending--; 4503 } 4504 4505 nvme_irq_deassert(n, cq); 4506 } 4507 4508 qemu_bh_schedule(cq->bh); 4509 } 4510 4511 static int nvme_init_cq_ioeventfd(NvmeCQueue *cq) 4512 { 4513 NvmeCtrl *n = cq->ctrl; 4514 uint16_t offset = (cq->cqid << 3) + (1 << 2); 4515 int ret; 4516 4517 ret = event_notifier_init(&cq->notifier, 0); 4518 if (ret < 0) { 4519 return ret; 4520 } 4521 4522 event_notifier_set_handler(&cq->notifier, nvme_cq_notifier); 4523 memory_region_add_eventfd(&n->iomem, 4524 0x1000 + offset, 4, false, 0, &cq->notifier); 4525 4526 return 0; 4527 } 4528 4529 static void nvme_sq_notifier(EventNotifier *e) 4530 { 4531 NvmeSQueue *sq = container_of(e, NvmeSQueue, notifier); 4532 4533 if (!event_notifier_test_and_clear(e)) { 4534 return; 4535 } 4536 4537 nvme_process_sq(sq); 4538 } 4539 4540 static int nvme_init_sq_ioeventfd(NvmeSQueue *sq) 4541 { 4542 NvmeCtrl *n = sq->ctrl; 4543 uint16_t offset = sq->sqid << 3; 4544 int ret; 4545 4546 ret = event_notifier_init(&sq->notifier, 0); 4547 if (ret < 0) { 4548 return ret; 4549 } 4550 4551 event_notifier_set_handler(&sq->notifier, nvme_sq_notifier); 4552 memory_region_add_eventfd(&n->iomem, 4553 0x1000 + offset, 4, false, 0, &sq->notifier); 4554 4555 return 0; 4556 } 4557 4558 static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n) 4559 { 4560 uint16_t offset = sq->sqid << 3; 4561 4562 n->sq[sq->sqid] = NULL; 4563 qemu_bh_delete(sq->bh); 4564 if (sq->ioeventfd_enabled) { 4565 memory_region_del_eventfd(&n->iomem, 4566 0x1000 + offset, 4, false, 0, &sq->notifier); 4567 event_notifier_set_handler(&sq->notifier, NULL); 4568 event_notifier_cleanup(&sq->notifier); 4569 } 4570 g_free(sq->io_req); 4571 if (sq->sqid) { 4572 g_free(sq); 4573 } 4574 } 4575 4576 static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req) 4577 { 4578 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; 4579 NvmeRequest *r, *next; 4580 NvmeSQueue *sq; 4581 NvmeCQueue *cq; 4582 uint16_t qid = le16_to_cpu(c->qid); 4583 4584 if (unlikely(!qid || nvme_check_sqid(n, qid))) { 4585 trace_pci_nvme_err_invalid_del_sq(qid); 4586 return NVME_INVALID_QID | NVME_DNR; 4587 } 4588 4589 trace_pci_nvme_del_sq(qid); 4590 4591 sq = n->sq[qid]; 4592 while (!QTAILQ_EMPTY(&sq->out_req_list)) { 4593 r = QTAILQ_FIRST(&sq->out_req_list); 4594 assert(r->aiocb); 4595 blk_aio_cancel(r->aiocb); 4596 } 4597 4598 assert(QTAILQ_EMPTY(&sq->out_req_list)); 4599 4600 if (!nvme_check_cqid(n, sq->cqid)) { 4601 cq = n->cq[sq->cqid]; 4602 QTAILQ_REMOVE(&cq->sq_list, sq, entry); 4603 4604 nvme_post_cqes(cq); 4605 QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) { 4606 if (r->sq == sq) { 4607 QTAILQ_REMOVE(&cq->req_list, r, entry); 4608 QTAILQ_INSERT_TAIL(&sq->req_list, r, entry); 4609 } 4610 } 4611 } 4612 4613 nvme_free_sq(sq, n); 4614 return NVME_SUCCESS; 4615 } 4616 4617 static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, 4618 uint16_t sqid, uint16_t cqid, uint16_t size) 4619 { 4620 int i; 4621 NvmeCQueue *cq; 4622 4623 sq->ctrl = n; 4624 sq->dma_addr = dma_addr; 4625 sq->sqid = sqid; 4626 sq->size = size; 4627 sq->cqid = cqid; 4628 sq->head = sq->tail = 0; 4629 sq->io_req = g_new0(NvmeRequest, sq->size); 4630 4631 QTAILQ_INIT(&sq->req_list); 4632 QTAILQ_INIT(&sq->out_req_list); 4633 for (i = 0; i < sq->size; i++) { 4634 sq->io_req[i].sq = sq; 4635 QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); 4636 } 4637 4638 sq->bh = qemu_bh_new_guarded(nvme_process_sq, sq, 4639 &DEVICE(sq->ctrl)->mem_reentrancy_guard); 4640 4641 if (n->dbbuf_enabled) { 4642 sq->db_addr = n->dbbuf_dbs + (sqid << 3); 4643 sq->ei_addr = n->dbbuf_eis + (sqid << 3); 4644 4645 if (n->params.ioeventfd && sq->sqid != 0) { 4646 if (!nvme_init_sq_ioeventfd(sq)) { 4647 sq->ioeventfd_enabled = true; 4648 } 4649 } 4650 } 4651 4652 assert(n->cq[cqid]); 4653 cq = n->cq[cqid]; 4654 QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry); 4655 n->sq[sqid] = sq; 4656 } 4657 4658 static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req) 4659 { 4660 NvmeSQueue *sq; 4661 NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd; 4662 4663 uint16_t cqid = le16_to_cpu(c->cqid); 4664 uint16_t sqid = le16_to_cpu(c->sqid); 4665 uint16_t qsize = le16_to_cpu(c->qsize); 4666 uint16_t qflags = le16_to_cpu(c->sq_flags); 4667 uint64_t prp1 = le64_to_cpu(c->prp1); 4668 4669 trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags); 4670 4671 if (unlikely(!cqid || nvme_check_cqid(n, cqid))) { 4672 trace_pci_nvme_err_invalid_create_sq_cqid(cqid); 4673 return NVME_INVALID_CQID | NVME_DNR; 4674 } 4675 if (unlikely(!sqid || sqid > n->conf_ioqpairs || n->sq[sqid] != NULL)) { 4676 trace_pci_nvme_err_invalid_create_sq_sqid(sqid); 4677 return NVME_INVALID_QID | NVME_DNR; 4678 } 4679 if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) { 4680 trace_pci_nvme_err_invalid_create_sq_size(qsize); 4681 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; 4682 } 4683 if (unlikely(prp1 & (n->page_size - 1))) { 4684 trace_pci_nvme_err_invalid_create_sq_addr(prp1); 4685 return NVME_INVALID_PRP_OFFSET | NVME_DNR; 4686 } 4687 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) { 4688 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags)); 4689 return NVME_INVALID_FIELD | NVME_DNR; 4690 } 4691 sq = g_malloc0(sizeof(*sq)); 4692 nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1); 4693 return NVME_SUCCESS; 4694 } 4695 4696 struct nvme_stats { 4697 uint64_t units_read; 4698 uint64_t units_written; 4699 uint64_t read_commands; 4700 uint64_t write_commands; 4701 }; 4702 4703 static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats) 4704 { 4705 BlockAcctStats *s = blk_get_stats(ns->blkconf.blk); 4706 4707 stats->units_read += s->nr_bytes[BLOCK_ACCT_READ]; 4708 stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE]; 4709 stats->read_commands += s->nr_ops[BLOCK_ACCT_READ]; 4710 stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE]; 4711 } 4712 4713 static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, 4714 uint64_t off, NvmeRequest *req) 4715 { 4716 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 4717 struct nvme_stats stats = { 0 }; 4718 NvmeSmartLog smart = { 0 }; 4719 uint32_t trans_len; 4720 NvmeNamespace *ns; 4721 time_t current_ms; 4722 uint64_t u_read, u_written; 4723 4724 if (off >= sizeof(smart)) { 4725 return NVME_INVALID_FIELD | NVME_DNR; 4726 } 4727 4728 if (nsid != 0xffffffff) { 4729 ns = nvme_ns(n, nsid); 4730 if (!ns) { 4731 return NVME_INVALID_NSID | NVME_DNR; 4732 } 4733 nvme_set_blk_stats(ns, &stats); 4734 } else { 4735 int i; 4736 4737 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 4738 ns = nvme_ns(n, i); 4739 if (!ns) { 4740 continue; 4741 } 4742 nvme_set_blk_stats(ns, &stats); 4743 } 4744 } 4745 4746 trans_len = MIN(sizeof(smart) - off, buf_len); 4747 smart.critical_warning = n->smart_critical_warning; 4748 4749 u_read = DIV_ROUND_UP(stats.units_read >> BDRV_SECTOR_BITS, 1000); 4750 u_written = DIV_ROUND_UP(stats.units_written >> BDRV_SECTOR_BITS, 1000); 4751 4752 smart.data_units_read[0] = cpu_to_le64(u_read); 4753 smart.data_units_written[0] = cpu_to_le64(u_written); 4754 smart.host_read_commands[0] = cpu_to_le64(stats.read_commands); 4755 smart.host_write_commands[0] = cpu_to_le64(stats.write_commands); 4756 4757 smart.temperature = cpu_to_le16(n->temperature); 4758 4759 if ((n->temperature >= n->features.temp_thresh_hi) || 4760 (n->temperature <= n->features.temp_thresh_low)) { 4761 smart.critical_warning |= NVME_SMART_TEMPERATURE; 4762 } 4763 4764 current_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 4765 smart.power_on_hours[0] = 4766 cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60); 4767 4768 if (!rae) { 4769 nvme_clear_events(n, NVME_AER_TYPE_SMART); 4770 } 4771 4772 return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req); 4773 } 4774 4775 static uint16_t nvme_endgrp_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, 4776 uint64_t off, NvmeRequest *req) 4777 { 4778 uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); 4779 uint16_t endgrpid = (dw11 >> 16) & 0xffff; 4780 struct nvme_stats stats = {}; 4781 NvmeEndGrpLog info = {}; 4782 int i; 4783 4784 if (!n->subsys || endgrpid != 0x1) { 4785 return NVME_INVALID_FIELD | NVME_DNR; 4786 } 4787 4788 if (off >= sizeof(info)) { 4789 return NVME_INVALID_FIELD | NVME_DNR; 4790 } 4791 4792 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 4793 NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i); 4794 if (!ns) { 4795 continue; 4796 } 4797 4798 nvme_set_blk_stats(ns, &stats); 4799 } 4800 4801 info.data_units_read[0] = 4802 cpu_to_le64(DIV_ROUND_UP(stats.units_read / 1000000000, 1000000000)); 4803 info.data_units_written[0] = 4804 cpu_to_le64(DIV_ROUND_UP(stats.units_written / 1000000000, 1000000000)); 4805 info.media_units_written[0] = 4806 cpu_to_le64(DIV_ROUND_UP(stats.units_written / 1000000000, 1000000000)); 4807 4808 info.host_read_commands[0] = cpu_to_le64(stats.read_commands); 4809 info.host_write_commands[0] = cpu_to_le64(stats.write_commands); 4810 4811 buf_len = MIN(sizeof(info) - off, buf_len); 4812 4813 return nvme_c2h(n, (uint8_t *)&info + off, buf_len, req); 4814 } 4815 4816 4817 static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off, 4818 NvmeRequest *req) 4819 { 4820 uint32_t trans_len; 4821 NvmeFwSlotInfoLog fw_log = { 4822 .afi = 0x1, 4823 }; 4824 4825 if (off >= sizeof(fw_log)) { 4826 return NVME_INVALID_FIELD | NVME_DNR; 4827 } 4828 4829 strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' '); 4830 trans_len = MIN(sizeof(fw_log) - off, buf_len); 4831 4832 return nvme_c2h(n, (uint8_t *) &fw_log + off, trans_len, req); 4833 } 4834 4835 static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, 4836 uint64_t off, NvmeRequest *req) 4837 { 4838 uint32_t trans_len; 4839 NvmeErrorLog errlog; 4840 4841 if (off >= sizeof(errlog)) { 4842 return NVME_INVALID_FIELD | NVME_DNR; 4843 } 4844 4845 if (!rae) { 4846 nvme_clear_events(n, NVME_AER_TYPE_ERROR); 4847 } 4848 4849 memset(&errlog, 0x0, sizeof(errlog)); 4850 trans_len = MIN(sizeof(errlog) - off, buf_len); 4851 4852 return nvme_c2h(n, (uint8_t *)&errlog, trans_len, req); 4853 } 4854 4855 static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, 4856 uint64_t off, NvmeRequest *req) 4857 { 4858 uint32_t nslist[1024]; 4859 uint32_t trans_len; 4860 int i = 0; 4861 uint32_t nsid; 4862 4863 if (off >= sizeof(nslist)) { 4864 trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(nslist)); 4865 return NVME_INVALID_FIELD | NVME_DNR; 4866 } 4867 4868 memset(nslist, 0x0, sizeof(nslist)); 4869 trans_len = MIN(sizeof(nslist) - off, buf_len); 4870 4871 while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) != 4872 NVME_CHANGED_NSID_SIZE) { 4873 /* 4874 * If more than 1024 namespaces, the first entry in the log page should 4875 * be set to FFFFFFFFh and the others to 0 as spec. 4876 */ 4877 if (i == ARRAY_SIZE(nslist)) { 4878 memset(nslist, 0x0, sizeof(nslist)); 4879 nslist[0] = 0xffffffff; 4880 break; 4881 } 4882 4883 nslist[i++] = nsid; 4884 clear_bit(nsid, n->changed_nsids); 4885 } 4886 4887 /* 4888 * Remove all the remaining list entries in case returns directly due to 4889 * more than 1024 namespaces. 4890 */ 4891 if (nslist[0] == 0xffffffff) { 4892 bitmap_zero(n->changed_nsids, NVME_CHANGED_NSID_SIZE); 4893 } 4894 4895 if (!rae) { 4896 nvme_clear_events(n, NVME_AER_TYPE_NOTICE); 4897 } 4898 4899 return nvme_c2h(n, ((uint8_t *)nslist) + off, trans_len, req); 4900 } 4901 4902 static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len, 4903 uint64_t off, NvmeRequest *req) 4904 { 4905 NvmeEffectsLog log = {}; 4906 const uint32_t *src_iocs = NULL; 4907 uint32_t trans_len; 4908 4909 if (off >= sizeof(log)) { 4910 trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(log)); 4911 return NVME_INVALID_FIELD | NVME_DNR; 4912 } 4913 4914 switch (NVME_CC_CSS(ldl_le_p(&n->bar.cc))) { 4915 case NVME_CC_CSS_NVM: 4916 src_iocs = nvme_cse_iocs_nvm; 4917 /* fall through */ 4918 case NVME_CC_CSS_ADMIN_ONLY: 4919 break; 4920 case NVME_CC_CSS_CSI: 4921 switch (csi) { 4922 case NVME_CSI_NVM: 4923 src_iocs = nvme_cse_iocs_nvm; 4924 break; 4925 case NVME_CSI_ZONED: 4926 src_iocs = nvme_cse_iocs_zoned; 4927 break; 4928 } 4929 } 4930 4931 memcpy(log.acs, nvme_cse_acs, sizeof(nvme_cse_acs)); 4932 4933 if (src_iocs) { 4934 memcpy(log.iocs, src_iocs, sizeof(log.iocs)); 4935 } 4936 4937 trans_len = MIN(sizeof(log) - off, buf_len); 4938 4939 return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req); 4940 } 4941 4942 static size_t sizeof_fdp_conf_descr(size_t nruh, size_t vss) 4943 { 4944 size_t entry_siz = sizeof(NvmeFdpDescrHdr) + nruh * sizeof(NvmeRuhDescr) 4945 + vss; 4946 return ROUND_UP(entry_siz, 8); 4947 } 4948 4949 static uint16_t nvme_fdp_confs(NvmeCtrl *n, uint32_t endgrpid, uint32_t buf_len, 4950 uint64_t off, NvmeRequest *req) 4951 { 4952 uint32_t log_size, trans_len; 4953 g_autofree uint8_t *buf = NULL; 4954 NvmeFdpDescrHdr *hdr; 4955 NvmeRuhDescr *ruhd; 4956 NvmeEnduranceGroup *endgrp; 4957 NvmeFdpConfsHdr *log; 4958 size_t nruh, fdp_descr_size; 4959 int i; 4960 4961 if (endgrpid != 1 || !n->subsys) { 4962 return NVME_INVALID_FIELD | NVME_DNR; 4963 } 4964 4965 endgrp = &n->subsys->endgrp; 4966 4967 if (endgrp->fdp.enabled) { 4968 nruh = endgrp->fdp.nruh; 4969 } else { 4970 nruh = 1; 4971 } 4972 4973 fdp_descr_size = sizeof_fdp_conf_descr(nruh, FDPVSS); 4974 log_size = sizeof(NvmeFdpConfsHdr) + fdp_descr_size; 4975 4976 if (off >= log_size) { 4977 return NVME_INVALID_FIELD | NVME_DNR; 4978 } 4979 4980 trans_len = MIN(log_size - off, buf_len); 4981 4982 buf = g_malloc0(log_size); 4983 log = (NvmeFdpConfsHdr *)buf; 4984 hdr = (NvmeFdpDescrHdr *)(log + 1); 4985 ruhd = (NvmeRuhDescr *)(buf + sizeof(*log) + sizeof(*hdr)); 4986 4987 log->num_confs = cpu_to_le16(0); 4988 log->size = cpu_to_le32(log_size); 4989 4990 hdr->descr_size = cpu_to_le16(fdp_descr_size); 4991 if (endgrp->fdp.enabled) { 4992 hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, VALID, 1); 4993 hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, RGIF, endgrp->fdp.rgif); 4994 hdr->nrg = cpu_to_le16(endgrp->fdp.nrg); 4995 hdr->nruh = cpu_to_le16(endgrp->fdp.nruh); 4996 hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1); 4997 hdr->nnss = cpu_to_le32(NVME_MAX_NAMESPACES); 4998 hdr->runs = cpu_to_le64(endgrp->fdp.runs); 4999 5000 for (i = 0; i < nruh; i++) { 5001 ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED; 5002 ruhd++; 5003 } 5004 } else { 5005 /* 1 bit for RUH in PIF -> 2 RUHs max. */ 5006 hdr->nrg = cpu_to_le16(1); 5007 hdr->nruh = cpu_to_le16(1); 5008 hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1); 5009 hdr->nnss = cpu_to_le32(1); 5010 hdr->runs = cpu_to_le64(96 * MiB); 5011 5012 ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED; 5013 } 5014 5015 return nvme_c2h(n, (uint8_t *)buf + off, trans_len, req); 5016 } 5017 5018 static uint16_t nvme_fdp_ruh_usage(NvmeCtrl *n, uint32_t endgrpid, 5019 uint32_t dw10, uint32_t dw12, 5020 uint32_t buf_len, uint64_t off, 5021 NvmeRequest *req) 5022 { 5023 NvmeRuHandle *ruh; 5024 NvmeRuhuLog *hdr; 5025 NvmeRuhuDescr *ruhud; 5026 NvmeEnduranceGroup *endgrp; 5027 g_autofree uint8_t *buf = NULL; 5028 uint32_t log_size, trans_len; 5029 uint16_t i; 5030 5031 if (endgrpid != 1 || !n->subsys) { 5032 return NVME_INVALID_FIELD | NVME_DNR; 5033 } 5034 5035 endgrp = &n->subsys->endgrp; 5036 5037 if (!endgrp->fdp.enabled) { 5038 return NVME_FDP_DISABLED | NVME_DNR; 5039 } 5040 5041 log_size = sizeof(NvmeRuhuLog) + endgrp->fdp.nruh * sizeof(NvmeRuhuDescr); 5042 5043 if (off >= log_size) { 5044 return NVME_INVALID_FIELD | NVME_DNR; 5045 } 5046 5047 trans_len = MIN(log_size - off, buf_len); 5048 5049 buf = g_malloc0(log_size); 5050 hdr = (NvmeRuhuLog *)buf; 5051 ruhud = (NvmeRuhuDescr *)(hdr + 1); 5052 5053 ruh = endgrp->fdp.ruhs; 5054 hdr->nruh = cpu_to_le16(endgrp->fdp.nruh); 5055 5056 for (i = 0; i < endgrp->fdp.nruh; i++, ruhud++, ruh++) { 5057 ruhud->ruha = ruh->ruha; 5058 } 5059 5060 return nvme_c2h(n, (uint8_t *)buf + off, trans_len, req); 5061 } 5062 5063 static uint16_t nvme_fdp_stats(NvmeCtrl *n, uint32_t endgrpid, uint32_t buf_len, 5064 uint64_t off, NvmeRequest *req) 5065 { 5066 NvmeEnduranceGroup *endgrp; 5067 NvmeFdpStatsLog log = {}; 5068 uint32_t trans_len; 5069 5070 if (off >= sizeof(NvmeFdpStatsLog)) { 5071 return NVME_INVALID_FIELD | NVME_DNR; 5072 } 5073 5074 if (endgrpid != 1 || !n->subsys) { 5075 return NVME_INVALID_FIELD | NVME_DNR; 5076 } 5077 5078 if (!n->subsys->endgrp.fdp.enabled) { 5079 return NVME_FDP_DISABLED | NVME_DNR; 5080 } 5081 5082 endgrp = &n->subsys->endgrp; 5083 5084 trans_len = MIN(sizeof(log) - off, buf_len); 5085 5086 /* spec value is 128 bit, we only use 64 bit */ 5087 log.hbmw[0] = cpu_to_le64(endgrp->fdp.hbmw); 5088 log.mbmw[0] = cpu_to_le64(endgrp->fdp.mbmw); 5089 log.mbe[0] = cpu_to_le64(endgrp->fdp.mbe); 5090 5091 return nvme_c2h(n, (uint8_t *)&log + off, trans_len, req); 5092 } 5093 5094 static uint16_t nvme_fdp_events(NvmeCtrl *n, uint32_t endgrpid, 5095 uint32_t buf_len, uint64_t off, 5096 NvmeRequest *req) 5097 { 5098 NvmeEnduranceGroup *endgrp; 5099 NvmeCmd *cmd = &req->cmd; 5100 bool host_events = (cmd->cdw10 >> 8) & 0x1; 5101 uint32_t log_size, trans_len; 5102 NvmeFdpEventBuffer *ebuf; 5103 g_autofree NvmeFdpEventsLog *elog = NULL; 5104 NvmeFdpEvent *event; 5105 5106 if (endgrpid != 1 || !n->subsys) { 5107 return NVME_INVALID_FIELD | NVME_DNR; 5108 } 5109 5110 endgrp = &n->subsys->endgrp; 5111 5112 if (!endgrp->fdp.enabled) { 5113 return NVME_FDP_DISABLED | NVME_DNR; 5114 } 5115 5116 if (host_events) { 5117 ebuf = &endgrp->fdp.host_events; 5118 } else { 5119 ebuf = &endgrp->fdp.ctrl_events; 5120 } 5121 5122 log_size = sizeof(NvmeFdpEventsLog) + ebuf->nelems * sizeof(NvmeFdpEvent); 5123 trans_len = MIN(log_size - off, buf_len); 5124 elog = g_malloc0(log_size); 5125 elog->num_events = cpu_to_le32(ebuf->nelems); 5126 event = (NvmeFdpEvent *)(elog + 1); 5127 5128 if (ebuf->nelems && ebuf->start == ebuf->next) { 5129 unsigned int nelems = (NVME_FDP_MAX_EVENTS - ebuf->start); 5130 /* wrap over, copy [start;NVME_FDP_MAX_EVENTS[ and [0; next[ */ 5131 memcpy(event, &ebuf->events[ebuf->start], 5132 sizeof(NvmeFdpEvent) * nelems); 5133 memcpy(event + nelems, ebuf->events, 5134 sizeof(NvmeFdpEvent) * ebuf->next); 5135 } else if (ebuf->start < ebuf->next) { 5136 memcpy(event, &ebuf->events[ebuf->start], 5137 sizeof(NvmeFdpEvent) * (ebuf->next - ebuf->start)); 5138 } 5139 5140 return nvme_c2h(n, (uint8_t *)elog + off, trans_len, req); 5141 } 5142 5143 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) 5144 { 5145 NvmeCmd *cmd = &req->cmd; 5146 5147 uint32_t dw10 = le32_to_cpu(cmd->cdw10); 5148 uint32_t dw11 = le32_to_cpu(cmd->cdw11); 5149 uint32_t dw12 = le32_to_cpu(cmd->cdw12); 5150 uint32_t dw13 = le32_to_cpu(cmd->cdw13); 5151 uint8_t lid = dw10 & 0xff; 5152 uint8_t lsp = (dw10 >> 8) & 0xf; 5153 uint8_t rae = (dw10 >> 15) & 0x1; 5154 uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24; 5155 uint32_t numdl, numdu, lspi; 5156 uint64_t off, lpol, lpou; 5157 size_t len; 5158 uint16_t status; 5159 5160 numdl = (dw10 >> 16); 5161 numdu = (dw11 & 0xffff); 5162 lspi = (dw11 >> 16); 5163 lpol = dw12; 5164 lpou = dw13; 5165 5166 len = (((numdu << 16) | numdl) + 1) << 2; 5167 off = (lpou << 32ULL) | lpol; 5168 5169 if (off & 0x3) { 5170 return NVME_INVALID_FIELD | NVME_DNR; 5171 } 5172 5173 trace_pci_nvme_get_log(nvme_cid(req), lid, lsp, rae, len, off); 5174 5175 status = nvme_check_mdts(n, len); 5176 if (status) { 5177 return status; 5178 } 5179 5180 switch (lid) { 5181 case NVME_LOG_ERROR_INFO: 5182 return nvme_error_info(n, rae, len, off, req); 5183 case NVME_LOG_SMART_INFO: 5184 return nvme_smart_info(n, rae, len, off, req); 5185 case NVME_LOG_FW_SLOT_INFO: 5186 return nvme_fw_log_info(n, len, off, req); 5187 case NVME_LOG_CHANGED_NSLIST: 5188 return nvme_changed_nslist(n, rae, len, off, req); 5189 case NVME_LOG_CMD_EFFECTS: 5190 return nvme_cmd_effects(n, csi, len, off, req); 5191 case NVME_LOG_ENDGRP: 5192 return nvme_endgrp_info(n, rae, len, off, req); 5193 case NVME_LOG_FDP_CONFS: 5194 return nvme_fdp_confs(n, lspi, len, off, req); 5195 case NVME_LOG_FDP_RUH_USAGE: 5196 return nvme_fdp_ruh_usage(n, lspi, dw10, dw12, len, off, req); 5197 case NVME_LOG_FDP_STATS: 5198 return nvme_fdp_stats(n, lspi, len, off, req); 5199 case NVME_LOG_FDP_EVENTS: 5200 return nvme_fdp_events(n, lspi, len, off, req); 5201 default: 5202 trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid); 5203 return NVME_INVALID_FIELD | NVME_DNR; 5204 } 5205 } 5206 5207 static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n) 5208 { 5209 PCIDevice *pci = PCI_DEVICE(n); 5210 uint16_t offset = (cq->cqid << 3) + (1 << 2); 5211 5212 n->cq[cq->cqid] = NULL; 5213 qemu_bh_delete(cq->bh); 5214 if (cq->ioeventfd_enabled) { 5215 memory_region_del_eventfd(&n->iomem, 5216 0x1000 + offset, 4, false, 0, &cq->notifier); 5217 event_notifier_set_handler(&cq->notifier, NULL); 5218 event_notifier_cleanup(&cq->notifier); 5219 } 5220 if (msix_enabled(pci)) { 5221 msix_vector_unuse(pci, cq->vector); 5222 } 5223 if (cq->cqid) { 5224 g_free(cq); 5225 } 5226 } 5227 5228 static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req) 5229 { 5230 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; 5231 NvmeCQueue *cq; 5232 uint16_t qid = le16_to_cpu(c->qid); 5233 5234 if (unlikely(!qid || nvme_check_cqid(n, qid))) { 5235 trace_pci_nvme_err_invalid_del_cq_cqid(qid); 5236 return NVME_INVALID_CQID | NVME_DNR; 5237 } 5238 5239 cq = n->cq[qid]; 5240 if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) { 5241 trace_pci_nvme_err_invalid_del_cq_notempty(qid); 5242 return NVME_INVALID_QUEUE_DEL; 5243 } 5244 5245 if (cq->irq_enabled && cq->tail != cq->head) { 5246 n->cq_pending--; 5247 } 5248 5249 nvme_irq_deassert(n, cq); 5250 trace_pci_nvme_del_cq(qid); 5251 nvme_free_cq(cq, n); 5252 return NVME_SUCCESS; 5253 } 5254 5255 static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, 5256 uint16_t cqid, uint16_t vector, uint16_t size, 5257 uint16_t irq_enabled) 5258 { 5259 PCIDevice *pci = PCI_DEVICE(n); 5260 5261 if (msix_enabled(pci)) { 5262 msix_vector_use(pci, vector); 5263 } 5264 cq->ctrl = n; 5265 cq->cqid = cqid; 5266 cq->size = size; 5267 cq->dma_addr = dma_addr; 5268 cq->phase = 1; 5269 cq->irq_enabled = irq_enabled; 5270 cq->vector = vector; 5271 cq->head = cq->tail = 0; 5272 QTAILQ_INIT(&cq->req_list); 5273 QTAILQ_INIT(&cq->sq_list); 5274 if (n->dbbuf_enabled) { 5275 cq->db_addr = n->dbbuf_dbs + (cqid << 3) + (1 << 2); 5276 cq->ei_addr = n->dbbuf_eis + (cqid << 3) + (1 << 2); 5277 5278 if (n->params.ioeventfd && cqid != 0) { 5279 if (!nvme_init_cq_ioeventfd(cq)) { 5280 cq->ioeventfd_enabled = true; 5281 } 5282 } 5283 } 5284 n->cq[cqid] = cq; 5285 cq->bh = qemu_bh_new_guarded(nvme_post_cqes, cq, 5286 &DEVICE(cq->ctrl)->mem_reentrancy_guard); 5287 } 5288 5289 static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) 5290 { 5291 NvmeCQueue *cq; 5292 NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd; 5293 uint16_t cqid = le16_to_cpu(c->cqid); 5294 uint16_t vector = le16_to_cpu(c->irq_vector); 5295 uint16_t qsize = le16_to_cpu(c->qsize); 5296 uint16_t qflags = le16_to_cpu(c->cq_flags); 5297 uint64_t prp1 = le64_to_cpu(c->prp1); 5298 5299 trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags, 5300 NVME_CQ_FLAGS_IEN(qflags) != 0); 5301 5302 if (unlikely(!cqid || cqid > n->conf_ioqpairs || n->cq[cqid] != NULL)) { 5303 trace_pci_nvme_err_invalid_create_cq_cqid(cqid); 5304 return NVME_INVALID_QID | NVME_DNR; 5305 } 5306 if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) { 5307 trace_pci_nvme_err_invalid_create_cq_size(qsize); 5308 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; 5309 } 5310 if (unlikely(prp1 & (n->page_size - 1))) { 5311 trace_pci_nvme_err_invalid_create_cq_addr(prp1); 5312 return NVME_INVALID_PRP_OFFSET | NVME_DNR; 5313 } 5314 if (unlikely(!msix_enabled(PCI_DEVICE(n)) && vector)) { 5315 trace_pci_nvme_err_invalid_create_cq_vector(vector); 5316 return NVME_INVALID_IRQ_VECTOR | NVME_DNR; 5317 } 5318 if (unlikely(vector >= n->conf_msix_qsize)) { 5319 trace_pci_nvme_err_invalid_create_cq_vector(vector); 5320 return NVME_INVALID_IRQ_VECTOR | NVME_DNR; 5321 } 5322 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) { 5323 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags)); 5324 return NVME_INVALID_FIELD | NVME_DNR; 5325 } 5326 5327 cq = g_malloc0(sizeof(*cq)); 5328 nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1, 5329 NVME_CQ_FLAGS_IEN(qflags)); 5330 5331 /* 5332 * It is only required to set qs_created when creating a completion queue; 5333 * creating a submission queue without a matching completion queue will 5334 * fail. 5335 */ 5336 n->qs_created = true; 5337 return NVME_SUCCESS; 5338 } 5339 5340 static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest *req) 5341 { 5342 uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {}; 5343 5344 return nvme_c2h(n, id, sizeof(id), req); 5345 } 5346 5347 static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req) 5348 { 5349 trace_pci_nvme_identify_ctrl(); 5350 5351 return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req); 5352 } 5353 5354 static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req) 5355 { 5356 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5357 uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {}; 5358 NvmeIdCtrlNvm *id_nvm = (NvmeIdCtrlNvm *)&id; 5359 5360 trace_pci_nvme_identify_ctrl_csi(c->csi); 5361 5362 switch (c->csi) { 5363 case NVME_CSI_NVM: 5364 id_nvm->vsl = n->params.vsl; 5365 id_nvm->dmrsl = cpu_to_le32(n->dmrsl); 5366 break; 5367 5368 case NVME_CSI_ZONED: 5369 ((NvmeIdCtrlZoned *)&id)->zasl = n->params.zasl; 5370 break; 5371 5372 default: 5373 return NVME_INVALID_FIELD | NVME_DNR; 5374 } 5375 5376 return nvme_c2h(n, id, sizeof(id), req); 5377 } 5378 5379 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active) 5380 { 5381 NvmeNamespace *ns; 5382 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5383 uint32_t nsid = le32_to_cpu(c->nsid); 5384 5385 trace_pci_nvme_identify_ns(nsid); 5386 5387 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { 5388 return NVME_INVALID_NSID | NVME_DNR; 5389 } 5390 5391 ns = nvme_ns(n, nsid); 5392 if (unlikely(!ns)) { 5393 if (!active) { 5394 ns = nvme_subsys_ns(n->subsys, nsid); 5395 if (!ns) { 5396 return nvme_rpt_empty_id_struct(n, req); 5397 } 5398 } else { 5399 return nvme_rpt_empty_id_struct(n, req); 5400 } 5401 } 5402 5403 if (active || ns->csi == NVME_CSI_NVM) { 5404 return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req); 5405 } 5406 5407 return NVME_INVALID_CMD_SET | NVME_DNR; 5408 } 5409 5410 static uint16_t nvme_identify_ctrl_list(NvmeCtrl *n, NvmeRequest *req, 5411 bool attached) 5412 { 5413 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5414 uint32_t nsid = le32_to_cpu(c->nsid); 5415 uint16_t min_id = le16_to_cpu(c->ctrlid); 5416 uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {}; 5417 uint16_t *ids = &list[1]; 5418 NvmeNamespace *ns; 5419 NvmeCtrl *ctrl; 5420 int cntlid, nr_ids = 0; 5421 5422 trace_pci_nvme_identify_ctrl_list(c->cns, min_id); 5423 5424 if (!n->subsys) { 5425 return NVME_INVALID_FIELD | NVME_DNR; 5426 } 5427 5428 if (attached) { 5429 if (nsid == NVME_NSID_BROADCAST) { 5430 return NVME_INVALID_FIELD | NVME_DNR; 5431 } 5432 5433 ns = nvme_subsys_ns(n->subsys, nsid); 5434 if (!ns) { 5435 return NVME_INVALID_FIELD | NVME_DNR; 5436 } 5437 } 5438 5439 for (cntlid = min_id; cntlid < ARRAY_SIZE(n->subsys->ctrls); cntlid++) { 5440 ctrl = nvme_subsys_ctrl(n->subsys, cntlid); 5441 if (!ctrl) { 5442 continue; 5443 } 5444 5445 if (attached && !nvme_ns(ctrl, nsid)) { 5446 continue; 5447 } 5448 5449 ids[nr_ids++] = cntlid; 5450 } 5451 5452 list[0] = nr_ids; 5453 5454 return nvme_c2h(n, (uint8_t *)list, sizeof(list), req); 5455 } 5456 5457 static uint16_t nvme_identify_pri_ctrl_cap(NvmeCtrl *n, NvmeRequest *req) 5458 { 5459 trace_pci_nvme_identify_pri_ctrl_cap(le16_to_cpu(n->pri_ctrl_cap.cntlid)); 5460 5461 return nvme_c2h(n, (uint8_t *)&n->pri_ctrl_cap, 5462 sizeof(NvmePriCtrlCap), req); 5463 } 5464 5465 static uint16_t nvme_identify_sec_ctrl_list(NvmeCtrl *n, NvmeRequest *req) 5466 { 5467 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5468 uint16_t pri_ctrl_id = le16_to_cpu(n->pri_ctrl_cap.cntlid); 5469 uint16_t min_id = le16_to_cpu(c->ctrlid); 5470 uint8_t num_sec_ctrl = n->sec_ctrl_list.numcntl; 5471 NvmeSecCtrlList list = {0}; 5472 uint8_t i; 5473 5474 for (i = 0; i < num_sec_ctrl; i++) { 5475 if (n->sec_ctrl_list.sec[i].scid >= min_id) { 5476 list.numcntl = num_sec_ctrl - i; 5477 memcpy(&list.sec, n->sec_ctrl_list.sec + i, 5478 list.numcntl * sizeof(NvmeSecCtrlEntry)); 5479 break; 5480 } 5481 } 5482 5483 trace_pci_nvme_identify_sec_ctrl_list(pri_ctrl_id, list.numcntl); 5484 5485 return nvme_c2h(n, (uint8_t *)&list, sizeof(list), req); 5486 } 5487 5488 static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, 5489 bool active) 5490 { 5491 NvmeNamespace *ns; 5492 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5493 uint32_t nsid = le32_to_cpu(c->nsid); 5494 5495 trace_pci_nvme_identify_ns_csi(nsid, c->csi); 5496 5497 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { 5498 return NVME_INVALID_NSID | NVME_DNR; 5499 } 5500 5501 ns = nvme_ns(n, nsid); 5502 if (unlikely(!ns)) { 5503 if (!active) { 5504 ns = nvme_subsys_ns(n->subsys, nsid); 5505 if (!ns) { 5506 return nvme_rpt_empty_id_struct(n, req); 5507 } 5508 } else { 5509 return nvme_rpt_empty_id_struct(n, req); 5510 } 5511 } 5512 5513 if (c->csi == NVME_CSI_NVM) { 5514 return nvme_c2h(n, (uint8_t *)&ns->id_ns_nvm, sizeof(NvmeIdNsNvm), 5515 req); 5516 } else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) { 5517 return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned), 5518 req); 5519 } 5520 5521 return NVME_INVALID_FIELD | NVME_DNR; 5522 } 5523 5524 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req, 5525 bool active) 5526 { 5527 NvmeNamespace *ns; 5528 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5529 uint32_t min_nsid = le32_to_cpu(c->nsid); 5530 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; 5531 static const int data_len = sizeof(list); 5532 uint32_t *list_ptr = (uint32_t *)list; 5533 int i, j = 0; 5534 5535 trace_pci_nvme_identify_nslist(min_nsid); 5536 5537 /* 5538 * Both FFFFFFFFh (NVME_NSID_BROADCAST) and FFFFFFFFEh are invalid values 5539 * since the Active Namespace ID List should return namespaces with ids 5540 * *higher* than the NSID specified in the command. This is also specified 5541 * in the spec (NVM Express v1.3d, Section 5.15.4). 5542 */ 5543 if (min_nsid >= NVME_NSID_BROADCAST - 1) { 5544 return NVME_INVALID_NSID | NVME_DNR; 5545 } 5546 5547 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 5548 ns = nvme_ns(n, i); 5549 if (!ns) { 5550 if (!active) { 5551 ns = nvme_subsys_ns(n->subsys, i); 5552 if (!ns) { 5553 continue; 5554 } 5555 } else { 5556 continue; 5557 } 5558 } 5559 if (ns->params.nsid <= min_nsid) { 5560 continue; 5561 } 5562 list_ptr[j++] = cpu_to_le32(ns->params.nsid); 5563 if (j == data_len / sizeof(uint32_t)) { 5564 break; 5565 } 5566 } 5567 5568 return nvme_c2h(n, list, data_len, req); 5569 } 5570 5571 static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req, 5572 bool active) 5573 { 5574 NvmeNamespace *ns; 5575 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5576 uint32_t min_nsid = le32_to_cpu(c->nsid); 5577 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; 5578 static const int data_len = sizeof(list); 5579 uint32_t *list_ptr = (uint32_t *)list; 5580 int i, j = 0; 5581 5582 trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi); 5583 5584 /* 5585 * Same as in nvme_identify_nslist(), FFFFFFFFh/FFFFFFFFEh are invalid. 5586 */ 5587 if (min_nsid >= NVME_NSID_BROADCAST - 1) { 5588 return NVME_INVALID_NSID | NVME_DNR; 5589 } 5590 5591 if (c->csi != NVME_CSI_NVM && c->csi != NVME_CSI_ZONED) { 5592 return NVME_INVALID_FIELD | NVME_DNR; 5593 } 5594 5595 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 5596 ns = nvme_ns(n, i); 5597 if (!ns) { 5598 if (!active) { 5599 ns = nvme_subsys_ns(n->subsys, i); 5600 if (!ns) { 5601 continue; 5602 } 5603 } else { 5604 continue; 5605 } 5606 } 5607 if (ns->params.nsid <= min_nsid || c->csi != ns->csi) { 5608 continue; 5609 } 5610 list_ptr[j++] = cpu_to_le32(ns->params.nsid); 5611 if (j == data_len / sizeof(uint32_t)) { 5612 break; 5613 } 5614 } 5615 5616 return nvme_c2h(n, list, data_len, req); 5617 } 5618 5619 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req) 5620 { 5621 NvmeNamespace *ns; 5622 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5623 uint32_t nsid = le32_to_cpu(c->nsid); 5624 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; 5625 uint8_t *pos = list; 5626 struct { 5627 NvmeIdNsDescr hdr; 5628 uint8_t v[NVME_NIDL_UUID]; 5629 } QEMU_PACKED uuid = {}; 5630 struct { 5631 NvmeIdNsDescr hdr; 5632 uint64_t v; 5633 } QEMU_PACKED eui64 = {}; 5634 struct { 5635 NvmeIdNsDescr hdr; 5636 uint8_t v; 5637 } QEMU_PACKED csi = {}; 5638 5639 trace_pci_nvme_identify_ns_descr_list(nsid); 5640 5641 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { 5642 return NVME_INVALID_NSID | NVME_DNR; 5643 } 5644 5645 ns = nvme_ns(n, nsid); 5646 if (unlikely(!ns)) { 5647 return NVME_INVALID_FIELD | NVME_DNR; 5648 } 5649 5650 if (!qemu_uuid_is_null(&ns->params.uuid)) { 5651 uuid.hdr.nidt = NVME_NIDT_UUID; 5652 uuid.hdr.nidl = NVME_NIDL_UUID; 5653 memcpy(uuid.v, ns->params.uuid.data, NVME_NIDL_UUID); 5654 memcpy(pos, &uuid, sizeof(uuid)); 5655 pos += sizeof(uuid); 5656 } 5657 5658 if (ns->params.eui64) { 5659 eui64.hdr.nidt = NVME_NIDT_EUI64; 5660 eui64.hdr.nidl = NVME_NIDL_EUI64; 5661 eui64.v = cpu_to_be64(ns->params.eui64); 5662 memcpy(pos, &eui64, sizeof(eui64)); 5663 pos += sizeof(eui64); 5664 } 5665 5666 csi.hdr.nidt = NVME_NIDT_CSI; 5667 csi.hdr.nidl = NVME_NIDL_CSI; 5668 csi.v = ns->csi; 5669 memcpy(pos, &csi, sizeof(csi)); 5670 pos += sizeof(csi); 5671 5672 return nvme_c2h(n, list, sizeof(list), req); 5673 } 5674 5675 static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req) 5676 { 5677 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; 5678 static const int data_len = sizeof(list); 5679 5680 trace_pci_nvme_identify_cmd_set(); 5681 5682 NVME_SET_CSI(*list, NVME_CSI_NVM); 5683 NVME_SET_CSI(*list, NVME_CSI_ZONED); 5684 5685 return nvme_c2h(n, list, data_len, req); 5686 } 5687 5688 static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req) 5689 { 5690 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; 5691 5692 trace_pci_nvme_identify(nvme_cid(req), c->cns, le16_to_cpu(c->ctrlid), 5693 c->csi); 5694 5695 switch (c->cns) { 5696 case NVME_ID_CNS_NS: 5697 return nvme_identify_ns(n, req, true); 5698 case NVME_ID_CNS_NS_PRESENT: 5699 return nvme_identify_ns(n, req, false); 5700 case NVME_ID_CNS_NS_ATTACHED_CTRL_LIST: 5701 return nvme_identify_ctrl_list(n, req, true); 5702 case NVME_ID_CNS_CTRL_LIST: 5703 return nvme_identify_ctrl_list(n, req, false); 5704 case NVME_ID_CNS_PRIMARY_CTRL_CAP: 5705 return nvme_identify_pri_ctrl_cap(n, req); 5706 case NVME_ID_CNS_SECONDARY_CTRL_LIST: 5707 return nvme_identify_sec_ctrl_list(n, req); 5708 case NVME_ID_CNS_CS_NS: 5709 return nvme_identify_ns_csi(n, req, true); 5710 case NVME_ID_CNS_CS_NS_PRESENT: 5711 return nvme_identify_ns_csi(n, req, false); 5712 case NVME_ID_CNS_CTRL: 5713 return nvme_identify_ctrl(n, req); 5714 case NVME_ID_CNS_CS_CTRL: 5715 return nvme_identify_ctrl_csi(n, req); 5716 case NVME_ID_CNS_NS_ACTIVE_LIST: 5717 return nvme_identify_nslist(n, req, true); 5718 case NVME_ID_CNS_NS_PRESENT_LIST: 5719 return nvme_identify_nslist(n, req, false); 5720 case NVME_ID_CNS_CS_NS_ACTIVE_LIST: 5721 return nvme_identify_nslist_csi(n, req, true); 5722 case NVME_ID_CNS_CS_NS_PRESENT_LIST: 5723 return nvme_identify_nslist_csi(n, req, false); 5724 case NVME_ID_CNS_NS_DESCR_LIST: 5725 return nvme_identify_ns_descr_list(n, req); 5726 case NVME_ID_CNS_IO_COMMAND_SET: 5727 return nvme_identify_cmd_set(n, req); 5728 default: 5729 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns)); 5730 return NVME_INVALID_FIELD | NVME_DNR; 5731 } 5732 } 5733 5734 static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req) 5735 { 5736 uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff; 5737 5738 req->cqe.result = 1; 5739 if (nvme_check_sqid(n, sqid)) { 5740 return NVME_INVALID_FIELD | NVME_DNR; 5741 } 5742 5743 return NVME_SUCCESS; 5744 } 5745 5746 static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts) 5747 { 5748 trace_pci_nvme_setfeat_timestamp(ts); 5749 5750 n->host_timestamp = le64_to_cpu(ts); 5751 n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 5752 } 5753 5754 static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n) 5755 { 5756 uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 5757 uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms; 5758 5759 union nvme_timestamp { 5760 struct { 5761 uint64_t timestamp:48; 5762 uint64_t sync:1; 5763 uint64_t origin:3; 5764 uint64_t rsvd1:12; 5765 }; 5766 uint64_t all; 5767 }; 5768 5769 union nvme_timestamp ts; 5770 ts.all = 0; 5771 ts.timestamp = n->host_timestamp + elapsed_time; 5772 5773 /* If the host timestamp is non-zero, set the timestamp origin */ 5774 ts.origin = n->host_timestamp ? 0x01 : 0x00; 5775 5776 trace_pci_nvme_getfeat_timestamp(ts.all); 5777 5778 return cpu_to_le64(ts.all); 5779 } 5780 5781 static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) 5782 { 5783 uint64_t timestamp = nvme_get_timestamp(n); 5784 5785 return nvme_c2h(n, (uint8_t *)×tamp, sizeof(timestamp), req); 5786 } 5787 5788 static int nvme_get_feature_fdp(NvmeCtrl *n, uint32_t endgrpid, 5789 uint32_t *result) 5790 { 5791 *result = 0; 5792 5793 if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { 5794 return NVME_INVALID_FIELD | NVME_DNR; 5795 } 5796 5797 *result = FIELD_DP16(0, FEAT_FDP, FDPE, 1); 5798 *result = FIELD_DP16(*result, FEAT_FDP, CONF_NDX, 0); 5799 5800 return NVME_SUCCESS; 5801 } 5802 5803 static uint16_t nvme_get_feature_fdp_events(NvmeCtrl *n, NvmeNamespace *ns, 5804 NvmeRequest *req, uint32_t *result) 5805 { 5806 NvmeCmd *cmd = &req->cmd; 5807 uint32_t cdw11 = le32_to_cpu(cmd->cdw11); 5808 uint16_t ph = cdw11 & 0xffff; 5809 uint8_t noet = (cdw11 >> 16) & 0xff; 5810 uint16_t ruhid, ret; 5811 uint32_t nentries = 0; 5812 uint8_t s_events_ndx = 0; 5813 size_t s_events_siz = sizeof(NvmeFdpEventDescr) * noet; 5814 g_autofree NvmeFdpEventDescr *s_events = g_malloc0(s_events_siz); 5815 NvmeRuHandle *ruh; 5816 NvmeFdpEventDescr *s_event; 5817 5818 if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { 5819 return NVME_FDP_DISABLED | NVME_DNR; 5820 } 5821 5822 if (!nvme_ph_valid(ns, ph)) { 5823 return NVME_INVALID_FIELD | NVME_DNR; 5824 } 5825 5826 ruhid = ns->fdp.phs[ph]; 5827 ruh = &n->subsys->endgrp.fdp.ruhs[ruhid]; 5828 5829 assert(ruh); 5830 5831 if (unlikely(noet == 0)) { 5832 return NVME_INVALID_FIELD | NVME_DNR; 5833 } 5834 5835 for (uint8_t event_type = 0; event_type < FDP_EVT_MAX; event_type++) { 5836 uint8_t shift = nvme_fdp_evf_shifts[event_type]; 5837 if (!shift && event_type) { 5838 /* 5839 * only first entry (event_type == 0) has a shift value of 0 5840 * other entries are simply unpopulated. 5841 */ 5842 continue; 5843 } 5844 5845 nentries++; 5846 5847 s_event = &s_events[s_events_ndx]; 5848 s_event->evt = event_type; 5849 s_event->evta = (ruh->event_filter >> shift) & 0x1; 5850 5851 /* break if all `noet` entries are filled */ 5852 if ((++s_events_ndx) == noet) { 5853 break; 5854 } 5855 } 5856 5857 ret = nvme_c2h(n, s_events, s_events_siz, req); 5858 if (ret) { 5859 return ret; 5860 } 5861 5862 *result = nentries; 5863 return NVME_SUCCESS; 5864 } 5865 5866 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) 5867 { 5868 NvmeCmd *cmd = &req->cmd; 5869 uint32_t dw10 = le32_to_cpu(cmd->cdw10); 5870 uint32_t dw11 = le32_to_cpu(cmd->cdw11); 5871 uint32_t nsid = le32_to_cpu(cmd->nsid); 5872 uint32_t result; 5873 uint8_t fid = NVME_GETSETFEAT_FID(dw10); 5874 NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10); 5875 uint16_t iv; 5876 NvmeNamespace *ns; 5877 int i; 5878 uint16_t endgrpid = 0, ret = NVME_SUCCESS; 5879 5880 static const uint32_t nvme_feature_default[NVME_FID_MAX] = { 5881 [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT, 5882 }; 5883 5884 trace_pci_nvme_getfeat(nvme_cid(req), nsid, fid, sel, dw11); 5885 5886 if (!nvme_feature_support[fid]) { 5887 return NVME_INVALID_FIELD | NVME_DNR; 5888 } 5889 5890 if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { 5891 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { 5892 /* 5893 * The Reservation Notification Mask and Reservation Persistence 5894 * features require a status code of Invalid Field in Command when 5895 * NSID is FFFFFFFFh. Since the device does not support those 5896 * features we can always return Invalid Namespace or Format as we 5897 * should do for all other features. 5898 */ 5899 return NVME_INVALID_NSID | NVME_DNR; 5900 } 5901 5902 if (!nvme_ns(n, nsid)) { 5903 return NVME_INVALID_FIELD | NVME_DNR; 5904 } 5905 } 5906 5907 switch (sel) { 5908 case NVME_GETFEAT_SELECT_CURRENT: 5909 break; 5910 case NVME_GETFEAT_SELECT_SAVED: 5911 /* no features are saveable by the controller; fallthrough */ 5912 case NVME_GETFEAT_SELECT_DEFAULT: 5913 goto defaults; 5914 case NVME_GETFEAT_SELECT_CAP: 5915 result = nvme_feature_cap[fid]; 5916 goto out; 5917 } 5918 5919 switch (fid) { 5920 case NVME_TEMPERATURE_THRESHOLD: 5921 result = 0; 5922 5923 /* 5924 * The controller only implements the Composite Temperature sensor, so 5925 * return 0 for all other sensors. 5926 */ 5927 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { 5928 goto out; 5929 } 5930 5931 switch (NVME_TEMP_THSEL(dw11)) { 5932 case NVME_TEMP_THSEL_OVER: 5933 result = n->features.temp_thresh_hi; 5934 goto out; 5935 case NVME_TEMP_THSEL_UNDER: 5936 result = n->features.temp_thresh_low; 5937 goto out; 5938 } 5939 5940 return NVME_INVALID_FIELD | NVME_DNR; 5941 case NVME_ERROR_RECOVERY: 5942 if (!nvme_nsid_valid(n, nsid)) { 5943 return NVME_INVALID_NSID | NVME_DNR; 5944 } 5945 5946 ns = nvme_ns(n, nsid); 5947 if (unlikely(!ns)) { 5948 return NVME_INVALID_FIELD | NVME_DNR; 5949 } 5950 5951 result = ns->features.err_rec; 5952 goto out; 5953 case NVME_VOLATILE_WRITE_CACHE: 5954 result = 0; 5955 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 5956 ns = nvme_ns(n, i); 5957 if (!ns) { 5958 continue; 5959 } 5960 5961 result = blk_enable_write_cache(ns->blkconf.blk); 5962 if (result) { 5963 break; 5964 } 5965 } 5966 trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled"); 5967 goto out; 5968 case NVME_ASYNCHRONOUS_EVENT_CONF: 5969 result = n->features.async_config; 5970 goto out; 5971 case NVME_TIMESTAMP: 5972 return nvme_get_feature_timestamp(n, req); 5973 case NVME_HOST_BEHAVIOR_SUPPORT: 5974 return nvme_c2h(n, (uint8_t *)&n->features.hbs, 5975 sizeof(n->features.hbs), req); 5976 case NVME_FDP_MODE: 5977 endgrpid = dw11 & 0xff; 5978 5979 if (endgrpid != 0x1) { 5980 return NVME_INVALID_FIELD | NVME_DNR; 5981 } 5982 5983 ret = nvme_get_feature_fdp(n, endgrpid, &result); 5984 if (ret) { 5985 return ret; 5986 } 5987 goto out; 5988 case NVME_FDP_EVENTS: 5989 if (!nvme_nsid_valid(n, nsid)) { 5990 return NVME_INVALID_NSID | NVME_DNR; 5991 } 5992 5993 ns = nvme_ns(n, nsid); 5994 if (unlikely(!ns)) { 5995 return NVME_INVALID_FIELD | NVME_DNR; 5996 } 5997 5998 ret = nvme_get_feature_fdp_events(n, ns, req, &result); 5999 if (ret) { 6000 return ret; 6001 } 6002 goto out; 6003 default: 6004 break; 6005 } 6006 6007 defaults: 6008 switch (fid) { 6009 case NVME_TEMPERATURE_THRESHOLD: 6010 result = 0; 6011 6012 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { 6013 break; 6014 } 6015 6016 if (NVME_TEMP_THSEL(dw11) == NVME_TEMP_THSEL_OVER) { 6017 result = NVME_TEMPERATURE_WARNING; 6018 } 6019 6020 break; 6021 case NVME_NUMBER_OF_QUEUES: 6022 result = (n->conf_ioqpairs - 1) | ((n->conf_ioqpairs - 1) << 16); 6023 trace_pci_nvme_getfeat_numq(result); 6024 break; 6025 case NVME_INTERRUPT_VECTOR_CONF: 6026 iv = dw11 & 0xffff; 6027 if (iv >= n->conf_ioqpairs + 1) { 6028 return NVME_INVALID_FIELD | NVME_DNR; 6029 } 6030 6031 result = iv; 6032 if (iv == n->admin_cq.vector) { 6033 result |= NVME_INTVC_NOCOALESCING; 6034 } 6035 break; 6036 case NVME_FDP_MODE: 6037 endgrpid = dw11 & 0xff; 6038 6039 if (endgrpid != 0x1) { 6040 return NVME_INVALID_FIELD | NVME_DNR; 6041 } 6042 6043 ret = nvme_get_feature_fdp(n, endgrpid, &result); 6044 if (ret) { 6045 return ret; 6046 } 6047 goto out; 6048 6049 break; 6050 default: 6051 result = nvme_feature_default[fid]; 6052 break; 6053 } 6054 6055 out: 6056 req->cqe.result = cpu_to_le32(result); 6057 return ret; 6058 } 6059 6060 static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) 6061 { 6062 uint16_t ret; 6063 uint64_t timestamp; 6064 6065 ret = nvme_h2c(n, (uint8_t *)×tamp, sizeof(timestamp), req); 6066 if (ret) { 6067 return ret; 6068 } 6069 6070 nvme_set_timestamp(n, timestamp); 6071 6072 return NVME_SUCCESS; 6073 } 6074 6075 static uint16_t nvme_set_feature_fdp_events(NvmeCtrl *n, NvmeNamespace *ns, 6076 NvmeRequest *req) 6077 { 6078 NvmeCmd *cmd = &req->cmd; 6079 uint32_t cdw11 = le32_to_cpu(cmd->cdw11); 6080 uint16_t ph = cdw11 & 0xffff; 6081 uint8_t noet = (cdw11 >> 16) & 0xff; 6082 uint16_t ret, ruhid; 6083 uint8_t enable = le32_to_cpu(cmd->cdw12) & 0x1; 6084 uint8_t event_mask = 0; 6085 unsigned int i; 6086 g_autofree uint8_t *events = g_malloc0(noet); 6087 NvmeRuHandle *ruh = NULL; 6088 6089 assert(ns); 6090 6091 if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { 6092 return NVME_FDP_DISABLED | NVME_DNR; 6093 } 6094 6095 if (!nvme_ph_valid(ns, ph)) { 6096 return NVME_INVALID_FIELD | NVME_DNR; 6097 } 6098 6099 ruhid = ns->fdp.phs[ph]; 6100 ruh = &n->subsys->endgrp.fdp.ruhs[ruhid]; 6101 6102 ret = nvme_h2c(n, events, noet, req); 6103 if (ret) { 6104 return ret; 6105 } 6106 6107 for (i = 0; i < noet; i++) { 6108 event_mask |= (1 << nvme_fdp_evf_shifts[events[i]]); 6109 } 6110 6111 if (enable) { 6112 ruh->event_filter |= event_mask; 6113 } else { 6114 ruh->event_filter = ruh->event_filter & ~event_mask; 6115 } 6116 6117 return NVME_SUCCESS; 6118 } 6119 6120 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) 6121 { 6122 NvmeNamespace *ns = NULL; 6123 6124 NvmeCmd *cmd = &req->cmd; 6125 uint32_t dw10 = le32_to_cpu(cmd->cdw10); 6126 uint32_t dw11 = le32_to_cpu(cmd->cdw11); 6127 uint32_t nsid = le32_to_cpu(cmd->nsid); 6128 uint8_t fid = NVME_GETSETFEAT_FID(dw10); 6129 uint8_t save = NVME_SETFEAT_SAVE(dw10); 6130 uint16_t status; 6131 int i; 6132 6133 trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11); 6134 6135 if (save && !(nvme_feature_cap[fid] & NVME_FEAT_CAP_SAVE)) { 6136 return NVME_FID_NOT_SAVEABLE | NVME_DNR; 6137 } 6138 6139 if (!nvme_feature_support[fid]) { 6140 return NVME_INVALID_FIELD | NVME_DNR; 6141 } 6142 6143 if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { 6144 if (nsid != NVME_NSID_BROADCAST) { 6145 if (!nvme_nsid_valid(n, nsid)) { 6146 return NVME_INVALID_NSID | NVME_DNR; 6147 } 6148 6149 ns = nvme_ns(n, nsid); 6150 if (unlikely(!ns)) { 6151 return NVME_INVALID_FIELD | NVME_DNR; 6152 } 6153 } 6154 } else if (nsid && nsid != NVME_NSID_BROADCAST) { 6155 if (!nvme_nsid_valid(n, nsid)) { 6156 return NVME_INVALID_NSID | NVME_DNR; 6157 } 6158 6159 return NVME_FEAT_NOT_NS_SPEC | NVME_DNR; 6160 } 6161 6162 if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) { 6163 return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; 6164 } 6165 6166 switch (fid) { 6167 case NVME_TEMPERATURE_THRESHOLD: 6168 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { 6169 break; 6170 } 6171 6172 switch (NVME_TEMP_THSEL(dw11)) { 6173 case NVME_TEMP_THSEL_OVER: 6174 n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11); 6175 break; 6176 case NVME_TEMP_THSEL_UNDER: 6177 n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11); 6178 break; 6179 default: 6180 return NVME_INVALID_FIELD | NVME_DNR; 6181 } 6182 6183 if ((n->temperature >= n->features.temp_thresh_hi) || 6184 (n->temperature <= n->features.temp_thresh_low)) { 6185 nvme_smart_event(n, NVME_SMART_TEMPERATURE); 6186 } 6187 6188 break; 6189 case NVME_ERROR_RECOVERY: 6190 if (nsid == NVME_NSID_BROADCAST) { 6191 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 6192 ns = nvme_ns(n, i); 6193 6194 if (!ns) { 6195 continue; 6196 } 6197 6198 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { 6199 ns->features.err_rec = dw11; 6200 } 6201 } 6202 6203 break; 6204 } 6205 6206 assert(ns); 6207 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { 6208 ns->features.err_rec = dw11; 6209 } 6210 break; 6211 case NVME_VOLATILE_WRITE_CACHE: 6212 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 6213 ns = nvme_ns(n, i); 6214 if (!ns) { 6215 continue; 6216 } 6217 6218 if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) { 6219 blk_flush(ns->blkconf.blk); 6220 } 6221 6222 blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1); 6223 } 6224 6225 break; 6226 6227 case NVME_NUMBER_OF_QUEUES: 6228 if (n->qs_created) { 6229 return NVME_CMD_SEQ_ERROR | NVME_DNR; 6230 } 6231 6232 /* 6233 * NVMe v1.3, Section 5.21.1.7: FFFFh is not an allowed value for NCQR 6234 * and NSQR. 6235 */ 6236 if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) { 6237 return NVME_INVALID_FIELD | NVME_DNR; 6238 } 6239 6240 trace_pci_nvme_setfeat_numq((dw11 & 0xffff) + 1, 6241 ((dw11 >> 16) & 0xffff) + 1, 6242 n->conf_ioqpairs, 6243 n->conf_ioqpairs); 6244 req->cqe.result = cpu_to_le32((n->conf_ioqpairs - 1) | 6245 ((n->conf_ioqpairs - 1) << 16)); 6246 break; 6247 case NVME_ASYNCHRONOUS_EVENT_CONF: 6248 n->features.async_config = dw11; 6249 break; 6250 case NVME_TIMESTAMP: 6251 return nvme_set_feature_timestamp(n, req); 6252 case NVME_HOST_BEHAVIOR_SUPPORT: 6253 status = nvme_h2c(n, (uint8_t *)&n->features.hbs, 6254 sizeof(n->features.hbs), req); 6255 if (status) { 6256 return status; 6257 } 6258 6259 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 6260 ns = nvme_ns(n, i); 6261 6262 if (!ns) { 6263 continue; 6264 } 6265 6266 ns->id_ns.nlbaf = ns->nlbaf - 1; 6267 if (!n->features.hbs.lbafee) { 6268 ns->id_ns.nlbaf = MIN(ns->id_ns.nlbaf, 15); 6269 } 6270 } 6271 6272 return status; 6273 case NVME_COMMAND_SET_PROFILE: 6274 if (dw11 & 0x1ff) { 6275 trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff); 6276 return NVME_CMD_SET_CMB_REJECTED | NVME_DNR; 6277 } 6278 break; 6279 case NVME_FDP_MODE: 6280 /* spec: abort with cmd seq err if there's one or more NS' in endgrp */ 6281 return NVME_CMD_SEQ_ERROR | NVME_DNR; 6282 case NVME_FDP_EVENTS: 6283 return nvme_set_feature_fdp_events(n, ns, req); 6284 default: 6285 return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; 6286 } 6287 return NVME_SUCCESS; 6288 } 6289 6290 static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req) 6291 { 6292 trace_pci_nvme_aer(nvme_cid(req)); 6293 6294 if (n->outstanding_aers > n->params.aerl) { 6295 trace_pci_nvme_aer_aerl_exceeded(); 6296 return NVME_AER_LIMIT_EXCEEDED; 6297 } 6298 6299 n->aer_reqs[n->outstanding_aers] = req; 6300 n->outstanding_aers++; 6301 6302 if (!QTAILQ_EMPTY(&n->aer_queue)) { 6303 nvme_process_aers(n); 6304 } 6305 6306 return NVME_NO_COMPLETE; 6307 } 6308 6309 static void nvme_update_dmrsl(NvmeCtrl *n) 6310 { 6311 int nsid; 6312 6313 for (nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) { 6314 NvmeNamespace *ns = nvme_ns(n, nsid); 6315 if (!ns) { 6316 continue; 6317 } 6318 6319 n->dmrsl = MIN_NON_ZERO(n->dmrsl, 6320 BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); 6321 } 6322 } 6323 6324 static void nvme_select_iocs_ns(NvmeCtrl *n, NvmeNamespace *ns) 6325 { 6326 uint32_t cc = ldl_le_p(&n->bar.cc); 6327 6328 ns->iocs = nvme_cse_iocs_none; 6329 switch (ns->csi) { 6330 case NVME_CSI_NVM: 6331 if (NVME_CC_CSS(cc) != NVME_CC_CSS_ADMIN_ONLY) { 6332 ns->iocs = nvme_cse_iocs_nvm; 6333 } 6334 break; 6335 case NVME_CSI_ZONED: 6336 if (NVME_CC_CSS(cc) == NVME_CC_CSS_CSI) { 6337 ns->iocs = nvme_cse_iocs_zoned; 6338 } else if (NVME_CC_CSS(cc) == NVME_CC_CSS_NVM) { 6339 ns->iocs = nvme_cse_iocs_nvm; 6340 } 6341 break; 6342 } 6343 } 6344 6345 static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) 6346 { 6347 NvmeNamespace *ns; 6348 NvmeCtrl *ctrl; 6349 uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {}; 6350 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 6351 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); 6352 uint8_t sel = dw10 & 0xf; 6353 uint16_t *nr_ids = &list[0]; 6354 uint16_t *ids = &list[1]; 6355 uint16_t ret; 6356 int i; 6357 6358 trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf); 6359 6360 if (!nvme_nsid_valid(n, nsid)) { 6361 return NVME_INVALID_NSID | NVME_DNR; 6362 } 6363 6364 ns = nvme_subsys_ns(n->subsys, nsid); 6365 if (!ns) { 6366 return NVME_INVALID_FIELD | NVME_DNR; 6367 } 6368 6369 ret = nvme_h2c(n, (uint8_t *)list, 4096, req); 6370 if (ret) { 6371 return ret; 6372 } 6373 6374 if (!*nr_ids) { 6375 return NVME_NS_CTRL_LIST_INVALID | NVME_DNR; 6376 } 6377 6378 *nr_ids = MIN(*nr_ids, NVME_CONTROLLER_LIST_SIZE - 1); 6379 for (i = 0; i < *nr_ids; i++) { 6380 ctrl = nvme_subsys_ctrl(n->subsys, ids[i]); 6381 if (!ctrl) { 6382 return NVME_NS_CTRL_LIST_INVALID | NVME_DNR; 6383 } 6384 6385 switch (sel) { 6386 case NVME_NS_ATTACHMENT_ATTACH: 6387 if (nvme_ns(ctrl, nsid)) { 6388 return NVME_NS_ALREADY_ATTACHED | NVME_DNR; 6389 } 6390 6391 if (ns->attached && !ns->params.shared) { 6392 return NVME_NS_PRIVATE | NVME_DNR; 6393 } 6394 6395 nvme_attach_ns(ctrl, ns); 6396 nvme_select_iocs_ns(ctrl, ns); 6397 6398 break; 6399 6400 case NVME_NS_ATTACHMENT_DETACH: 6401 if (!nvme_ns(ctrl, nsid)) { 6402 return NVME_NS_NOT_ATTACHED | NVME_DNR; 6403 } 6404 6405 ctrl->namespaces[nsid] = NULL; 6406 ns->attached--; 6407 6408 nvme_update_dmrsl(ctrl); 6409 6410 break; 6411 6412 default: 6413 return NVME_INVALID_FIELD | NVME_DNR; 6414 } 6415 6416 /* 6417 * Add namespace id to the changed namespace id list for event clearing 6418 * via Get Log Page command. 6419 */ 6420 if (!test_and_set_bit(nsid, ctrl->changed_nsids)) { 6421 nvme_enqueue_event(ctrl, NVME_AER_TYPE_NOTICE, 6422 NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED, 6423 NVME_LOG_CHANGED_NSLIST); 6424 } 6425 } 6426 6427 return NVME_SUCCESS; 6428 } 6429 6430 typedef struct NvmeFormatAIOCB { 6431 BlockAIOCB common; 6432 BlockAIOCB *aiocb; 6433 NvmeRequest *req; 6434 int ret; 6435 6436 NvmeNamespace *ns; 6437 uint32_t nsid; 6438 bool broadcast; 6439 int64_t offset; 6440 6441 uint8_t lbaf; 6442 uint8_t mset; 6443 uint8_t pi; 6444 uint8_t pil; 6445 } NvmeFormatAIOCB; 6446 6447 static void nvme_format_cancel(BlockAIOCB *aiocb) 6448 { 6449 NvmeFormatAIOCB *iocb = container_of(aiocb, NvmeFormatAIOCB, common); 6450 6451 iocb->ret = -ECANCELED; 6452 6453 if (iocb->aiocb) { 6454 blk_aio_cancel_async(iocb->aiocb); 6455 iocb->aiocb = NULL; 6456 } 6457 } 6458 6459 static const AIOCBInfo nvme_format_aiocb_info = { 6460 .aiocb_size = sizeof(NvmeFormatAIOCB), 6461 .cancel_async = nvme_format_cancel, 6462 .get_aio_context = nvme_get_aio_context, 6463 }; 6464 6465 static void nvme_format_set(NvmeNamespace *ns, uint8_t lbaf, uint8_t mset, 6466 uint8_t pi, uint8_t pil) 6467 { 6468 uint8_t lbafl = lbaf & 0xf; 6469 uint8_t lbafu = lbaf >> 4; 6470 6471 trace_pci_nvme_format_set(ns->params.nsid, lbaf, mset, pi, pil); 6472 6473 ns->id_ns.dps = (pil << 3) | pi; 6474 ns->id_ns.flbas = (lbafu << 5) | (mset << 4) | lbafl; 6475 6476 nvme_ns_init_format(ns); 6477 } 6478 6479 static void nvme_do_format(NvmeFormatAIOCB *iocb); 6480 6481 static void nvme_format_ns_cb(void *opaque, int ret) 6482 { 6483 NvmeFormatAIOCB *iocb = opaque; 6484 NvmeNamespace *ns = iocb->ns; 6485 int bytes; 6486 6487 if (iocb->ret < 0) { 6488 goto done; 6489 } else if (ret < 0) { 6490 iocb->ret = ret; 6491 goto done; 6492 } 6493 6494 assert(ns); 6495 6496 if (iocb->offset < ns->size) { 6497 bytes = MIN(BDRV_REQUEST_MAX_BYTES, ns->size - iocb->offset); 6498 6499 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, iocb->offset, 6500 bytes, BDRV_REQ_MAY_UNMAP, 6501 nvme_format_ns_cb, iocb); 6502 6503 iocb->offset += bytes; 6504 return; 6505 } 6506 6507 nvme_format_set(ns, iocb->lbaf, iocb->mset, iocb->pi, iocb->pil); 6508 ns->status = 0x0; 6509 iocb->ns = NULL; 6510 iocb->offset = 0; 6511 6512 done: 6513 nvme_do_format(iocb); 6514 } 6515 6516 static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi) 6517 { 6518 if (ns->params.zoned) { 6519 return NVME_INVALID_FORMAT | NVME_DNR; 6520 } 6521 6522 if (lbaf > ns->id_ns.nlbaf) { 6523 return NVME_INVALID_FORMAT | NVME_DNR; 6524 } 6525 6526 if (pi && (ns->id_ns.lbaf[lbaf].ms < nvme_pi_tuple_size(ns))) { 6527 return NVME_INVALID_FORMAT | NVME_DNR; 6528 } 6529 6530 if (pi && pi > NVME_ID_NS_DPS_TYPE_3) { 6531 return NVME_INVALID_FIELD | NVME_DNR; 6532 } 6533 6534 return NVME_SUCCESS; 6535 } 6536 6537 static void nvme_do_format(NvmeFormatAIOCB *iocb) 6538 { 6539 NvmeRequest *req = iocb->req; 6540 NvmeCtrl *n = nvme_ctrl(req); 6541 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); 6542 uint8_t lbaf = dw10 & 0xf; 6543 uint8_t pi = (dw10 >> 5) & 0x7; 6544 uint16_t status; 6545 int i; 6546 6547 if (iocb->ret < 0) { 6548 goto done; 6549 } 6550 6551 if (iocb->broadcast) { 6552 for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { 6553 iocb->ns = nvme_ns(n, i); 6554 if (iocb->ns) { 6555 iocb->nsid = i; 6556 break; 6557 } 6558 } 6559 } 6560 6561 if (!iocb->ns) { 6562 goto done; 6563 } 6564 6565 status = nvme_format_check(iocb->ns, lbaf, pi); 6566 if (status) { 6567 req->status = status; 6568 goto done; 6569 } 6570 6571 iocb->ns->status = NVME_FORMAT_IN_PROGRESS; 6572 nvme_format_ns_cb(iocb, 0); 6573 return; 6574 6575 done: 6576 iocb->common.cb(iocb->common.opaque, iocb->ret); 6577 qemu_aio_unref(iocb); 6578 } 6579 6580 static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req) 6581 { 6582 NvmeFormatAIOCB *iocb; 6583 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 6584 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); 6585 uint8_t lbaf = dw10 & 0xf; 6586 uint8_t mset = (dw10 >> 4) & 0x1; 6587 uint8_t pi = (dw10 >> 5) & 0x7; 6588 uint8_t pil = (dw10 >> 8) & 0x1; 6589 uint8_t lbafu = (dw10 >> 12) & 0x3; 6590 uint16_t status; 6591 6592 iocb = qemu_aio_get(&nvme_format_aiocb_info, NULL, nvme_misc_cb, req); 6593 6594 iocb->req = req; 6595 iocb->ret = 0; 6596 iocb->ns = NULL; 6597 iocb->nsid = 0; 6598 iocb->lbaf = lbaf; 6599 iocb->mset = mset; 6600 iocb->pi = pi; 6601 iocb->pil = pil; 6602 iocb->broadcast = (nsid == NVME_NSID_BROADCAST); 6603 iocb->offset = 0; 6604 6605 if (n->features.hbs.lbafee) { 6606 iocb->lbaf |= lbafu << 4; 6607 } 6608 6609 if (!iocb->broadcast) { 6610 if (!nvme_nsid_valid(n, nsid)) { 6611 status = NVME_INVALID_NSID | NVME_DNR; 6612 goto out; 6613 } 6614 6615 iocb->ns = nvme_ns(n, nsid); 6616 if (!iocb->ns) { 6617 status = NVME_INVALID_FIELD | NVME_DNR; 6618 goto out; 6619 } 6620 } 6621 6622 req->aiocb = &iocb->common; 6623 nvme_do_format(iocb); 6624 6625 return NVME_NO_COMPLETE; 6626 6627 out: 6628 qemu_aio_unref(iocb); 6629 6630 return status; 6631 } 6632 6633 static void nvme_get_virt_res_num(NvmeCtrl *n, uint8_t rt, int *num_total, 6634 int *num_prim, int *num_sec) 6635 { 6636 *num_total = le32_to_cpu(rt ? 6637 n->pri_ctrl_cap.vifrt : n->pri_ctrl_cap.vqfrt); 6638 *num_prim = le16_to_cpu(rt ? 6639 n->pri_ctrl_cap.virfap : n->pri_ctrl_cap.vqrfap); 6640 *num_sec = le16_to_cpu(rt ? n->pri_ctrl_cap.virfa : n->pri_ctrl_cap.vqrfa); 6641 } 6642 6643 static uint16_t nvme_assign_virt_res_to_prim(NvmeCtrl *n, NvmeRequest *req, 6644 uint16_t cntlid, uint8_t rt, 6645 int nr) 6646 { 6647 int num_total, num_prim, num_sec; 6648 6649 if (cntlid != n->cntlid) { 6650 return NVME_INVALID_CTRL_ID | NVME_DNR; 6651 } 6652 6653 nvme_get_virt_res_num(n, rt, &num_total, &num_prim, &num_sec); 6654 6655 if (nr > num_total) { 6656 return NVME_INVALID_NUM_RESOURCES | NVME_DNR; 6657 } 6658 6659 if (nr > num_total - num_sec) { 6660 return NVME_INVALID_RESOURCE_ID | NVME_DNR; 6661 } 6662 6663 if (rt) { 6664 n->next_pri_ctrl_cap.virfap = cpu_to_le16(nr); 6665 } else { 6666 n->next_pri_ctrl_cap.vqrfap = cpu_to_le16(nr); 6667 } 6668 6669 req->cqe.result = cpu_to_le32(nr); 6670 return req->status; 6671 } 6672 6673 static void nvme_update_virt_res(NvmeCtrl *n, NvmeSecCtrlEntry *sctrl, 6674 uint8_t rt, int nr) 6675 { 6676 int prev_nr, prev_total; 6677 6678 if (rt) { 6679 prev_nr = le16_to_cpu(sctrl->nvi); 6680 prev_total = le32_to_cpu(n->pri_ctrl_cap.virfa); 6681 sctrl->nvi = cpu_to_le16(nr); 6682 n->pri_ctrl_cap.virfa = cpu_to_le32(prev_total + nr - prev_nr); 6683 } else { 6684 prev_nr = le16_to_cpu(sctrl->nvq); 6685 prev_total = le32_to_cpu(n->pri_ctrl_cap.vqrfa); 6686 sctrl->nvq = cpu_to_le16(nr); 6687 n->pri_ctrl_cap.vqrfa = cpu_to_le32(prev_total + nr - prev_nr); 6688 } 6689 } 6690 6691 static uint16_t nvme_assign_virt_res_to_sec(NvmeCtrl *n, NvmeRequest *req, 6692 uint16_t cntlid, uint8_t rt, int nr) 6693 { 6694 int num_total, num_prim, num_sec, num_free, diff, limit; 6695 NvmeSecCtrlEntry *sctrl; 6696 6697 sctrl = nvme_sctrl_for_cntlid(n, cntlid); 6698 if (!sctrl) { 6699 return NVME_INVALID_CTRL_ID | NVME_DNR; 6700 } 6701 6702 if (sctrl->scs) { 6703 return NVME_INVALID_SEC_CTRL_STATE | NVME_DNR; 6704 } 6705 6706 limit = le16_to_cpu(rt ? n->pri_ctrl_cap.vifrsm : n->pri_ctrl_cap.vqfrsm); 6707 if (nr > limit) { 6708 return NVME_INVALID_NUM_RESOURCES | NVME_DNR; 6709 } 6710 6711 nvme_get_virt_res_num(n, rt, &num_total, &num_prim, &num_sec); 6712 num_free = num_total - num_prim - num_sec; 6713 diff = nr - le16_to_cpu(rt ? sctrl->nvi : sctrl->nvq); 6714 6715 if (diff > num_free) { 6716 return NVME_INVALID_RESOURCE_ID | NVME_DNR; 6717 } 6718 6719 nvme_update_virt_res(n, sctrl, rt, nr); 6720 req->cqe.result = cpu_to_le32(nr); 6721 6722 return req->status; 6723 } 6724 6725 static uint16_t nvme_virt_set_state(NvmeCtrl *n, uint16_t cntlid, bool online) 6726 { 6727 PCIDevice *pci = PCI_DEVICE(n); 6728 NvmeCtrl *sn = NULL; 6729 NvmeSecCtrlEntry *sctrl; 6730 int vf_index; 6731 6732 sctrl = nvme_sctrl_for_cntlid(n, cntlid); 6733 if (!sctrl) { 6734 return NVME_INVALID_CTRL_ID | NVME_DNR; 6735 } 6736 6737 if (!pci_is_vf(pci)) { 6738 vf_index = le16_to_cpu(sctrl->vfn) - 1; 6739 sn = NVME(pcie_sriov_get_vf_at_index(pci, vf_index)); 6740 } 6741 6742 if (online) { 6743 if (!sctrl->nvi || (le16_to_cpu(sctrl->nvq) < 2) || !sn) { 6744 return NVME_INVALID_SEC_CTRL_STATE | NVME_DNR; 6745 } 6746 6747 if (!sctrl->scs) { 6748 sctrl->scs = 0x1; 6749 nvme_ctrl_reset(sn, NVME_RESET_FUNCTION); 6750 } 6751 } else { 6752 nvme_update_virt_res(n, sctrl, NVME_VIRT_RES_INTERRUPT, 0); 6753 nvme_update_virt_res(n, sctrl, NVME_VIRT_RES_QUEUE, 0); 6754 6755 if (sctrl->scs) { 6756 sctrl->scs = 0x0; 6757 if (sn) { 6758 nvme_ctrl_reset(sn, NVME_RESET_FUNCTION); 6759 } 6760 } 6761 } 6762 6763 return NVME_SUCCESS; 6764 } 6765 6766 static uint16_t nvme_virt_mngmt(NvmeCtrl *n, NvmeRequest *req) 6767 { 6768 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); 6769 uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); 6770 uint8_t act = dw10 & 0xf; 6771 uint8_t rt = (dw10 >> 8) & 0x7; 6772 uint16_t cntlid = (dw10 >> 16) & 0xffff; 6773 int nr = dw11 & 0xffff; 6774 6775 trace_pci_nvme_virt_mngmt(nvme_cid(req), act, cntlid, rt ? "VI" : "VQ", nr); 6776 6777 if (rt != NVME_VIRT_RES_QUEUE && rt != NVME_VIRT_RES_INTERRUPT) { 6778 return NVME_INVALID_RESOURCE_ID | NVME_DNR; 6779 } 6780 6781 switch (act) { 6782 case NVME_VIRT_MNGMT_ACTION_SEC_ASSIGN: 6783 return nvme_assign_virt_res_to_sec(n, req, cntlid, rt, nr); 6784 case NVME_VIRT_MNGMT_ACTION_PRM_ALLOC: 6785 return nvme_assign_virt_res_to_prim(n, req, cntlid, rt, nr); 6786 case NVME_VIRT_MNGMT_ACTION_SEC_ONLINE: 6787 return nvme_virt_set_state(n, cntlid, true); 6788 case NVME_VIRT_MNGMT_ACTION_SEC_OFFLINE: 6789 return nvme_virt_set_state(n, cntlid, false); 6790 default: 6791 return NVME_INVALID_FIELD | NVME_DNR; 6792 } 6793 } 6794 6795 static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req) 6796 { 6797 PCIDevice *pci = PCI_DEVICE(n); 6798 uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1); 6799 uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2); 6800 int i; 6801 6802 /* Address should be page aligned */ 6803 if (dbs_addr & (n->page_size - 1) || eis_addr & (n->page_size - 1)) { 6804 return NVME_INVALID_FIELD | NVME_DNR; 6805 } 6806 6807 /* Save shadow buffer base addr for use during queue creation */ 6808 n->dbbuf_dbs = dbs_addr; 6809 n->dbbuf_eis = eis_addr; 6810 n->dbbuf_enabled = true; 6811 6812 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { 6813 NvmeSQueue *sq = n->sq[i]; 6814 NvmeCQueue *cq = n->cq[i]; 6815 6816 if (sq) { 6817 /* 6818 * CAP.DSTRD is 0, so offset of ith sq db_addr is (i<<3) 6819 * nvme_process_db() uses this hard-coded way to calculate 6820 * doorbell offsets. Be consistent with that here. 6821 */ 6822 sq->db_addr = dbs_addr + (i << 3); 6823 sq->ei_addr = eis_addr + (i << 3); 6824 stl_le_pci_dma(pci, sq->db_addr, sq->tail, MEMTXATTRS_UNSPECIFIED); 6825 6826 if (n->params.ioeventfd && sq->sqid != 0) { 6827 if (!nvme_init_sq_ioeventfd(sq)) { 6828 sq->ioeventfd_enabled = true; 6829 } 6830 } 6831 } 6832 6833 if (cq) { 6834 /* CAP.DSTRD is 0, so offset of ith cq db_addr is (i<<3)+(1<<2) */ 6835 cq->db_addr = dbs_addr + (i << 3) + (1 << 2); 6836 cq->ei_addr = eis_addr + (i << 3) + (1 << 2); 6837 stl_le_pci_dma(pci, cq->db_addr, cq->head, MEMTXATTRS_UNSPECIFIED); 6838 6839 if (n->params.ioeventfd && cq->cqid != 0) { 6840 if (!nvme_init_cq_ioeventfd(cq)) { 6841 cq->ioeventfd_enabled = true; 6842 } 6843 } 6844 } 6845 } 6846 6847 trace_pci_nvme_dbbuf_config(dbs_addr, eis_addr); 6848 6849 return NVME_SUCCESS; 6850 } 6851 6852 static uint16_t nvme_directive_send(NvmeCtrl *n, NvmeRequest *req) 6853 { 6854 return NVME_INVALID_FIELD | NVME_DNR; 6855 } 6856 6857 static uint16_t nvme_directive_receive(NvmeCtrl *n, NvmeRequest *req) 6858 { 6859 NvmeNamespace *ns; 6860 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); 6861 uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); 6862 uint32_t nsid = le32_to_cpu(req->cmd.nsid); 6863 uint8_t doper, dtype; 6864 uint32_t numd, trans_len; 6865 NvmeDirectiveIdentify id = { 6866 .supported = 1 << NVME_DIRECTIVE_IDENTIFY, 6867 .enabled = 1 << NVME_DIRECTIVE_IDENTIFY, 6868 }; 6869 6870 numd = dw10 + 1; 6871 doper = dw11 & 0xff; 6872 dtype = (dw11 >> 8) & 0xff; 6873 6874 trans_len = MIN(sizeof(NvmeDirectiveIdentify), numd << 2); 6875 6876 if (nsid == NVME_NSID_BROADCAST || dtype != NVME_DIRECTIVE_IDENTIFY || 6877 doper != NVME_DIRECTIVE_RETURN_PARAMS) { 6878 return NVME_INVALID_FIELD | NVME_DNR; 6879 } 6880 6881 ns = nvme_ns(n, nsid); 6882 if (!ns) { 6883 return NVME_INVALID_FIELD | NVME_DNR; 6884 } 6885 6886 switch (dtype) { 6887 case NVME_DIRECTIVE_IDENTIFY: 6888 switch (doper) { 6889 case NVME_DIRECTIVE_RETURN_PARAMS: 6890 if (ns->endgrp->fdp.enabled) { 6891 id.supported |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT; 6892 id.enabled |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT; 6893 id.persistent |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT; 6894 } 6895 6896 return nvme_c2h(n, (uint8_t *)&id, trans_len, req); 6897 6898 default: 6899 return NVME_INVALID_FIELD | NVME_DNR; 6900 } 6901 6902 default: 6903 return NVME_INVALID_FIELD; 6904 } 6905 } 6906 6907 static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) 6908 { 6909 trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode, 6910 nvme_adm_opc_str(req->cmd.opcode)); 6911 6912 if (!(nvme_cse_acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { 6913 trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode); 6914 return NVME_INVALID_OPCODE | NVME_DNR; 6915 } 6916 6917 /* SGLs shall not be used for Admin commands in NVMe over PCIe */ 6918 if (NVME_CMD_FLAGS_PSDT(req->cmd.flags) != NVME_PSDT_PRP) { 6919 return NVME_INVALID_FIELD | NVME_DNR; 6920 } 6921 6922 if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { 6923 return NVME_INVALID_FIELD; 6924 } 6925 6926 switch (req->cmd.opcode) { 6927 case NVME_ADM_CMD_DELETE_SQ: 6928 return nvme_del_sq(n, req); 6929 case NVME_ADM_CMD_CREATE_SQ: 6930 return nvme_create_sq(n, req); 6931 case NVME_ADM_CMD_GET_LOG_PAGE: 6932 return nvme_get_log(n, req); 6933 case NVME_ADM_CMD_DELETE_CQ: 6934 return nvme_del_cq(n, req); 6935 case NVME_ADM_CMD_CREATE_CQ: 6936 return nvme_create_cq(n, req); 6937 case NVME_ADM_CMD_IDENTIFY: 6938 return nvme_identify(n, req); 6939 case NVME_ADM_CMD_ABORT: 6940 return nvme_abort(n, req); 6941 case NVME_ADM_CMD_SET_FEATURES: 6942 return nvme_set_feature(n, req); 6943 case NVME_ADM_CMD_GET_FEATURES: 6944 return nvme_get_feature(n, req); 6945 case NVME_ADM_CMD_ASYNC_EV_REQ: 6946 return nvme_aer(n, req); 6947 case NVME_ADM_CMD_NS_ATTACHMENT: 6948 return nvme_ns_attachment(n, req); 6949 case NVME_ADM_CMD_VIRT_MNGMT: 6950 return nvme_virt_mngmt(n, req); 6951 case NVME_ADM_CMD_DBBUF_CONFIG: 6952 return nvme_dbbuf_config(n, req); 6953 case NVME_ADM_CMD_FORMAT_NVM: 6954 return nvme_format(n, req); 6955 case NVME_ADM_CMD_DIRECTIVE_SEND: 6956 return nvme_directive_send(n, req); 6957 case NVME_ADM_CMD_DIRECTIVE_RECV: 6958 return nvme_directive_receive(n, req); 6959 default: 6960 assert(false); 6961 } 6962 6963 return NVME_INVALID_OPCODE | NVME_DNR; 6964 } 6965 6966 static void nvme_update_sq_eventidx(const NvmeSQueue *sq) 6967 { 6968 trace_pci_nvme_update_sq_eventidx(sq->sqid, sq->tail); 6969 6970 stl_le_pci_dma(PCI_DEVICE(sq->ctrl), sq->ei_addr, sq->tail, 6971 MEMTXATTRS_UNSPECIFIED); 6972 } 6973 6974 static void nvme_update_sq_tail(NvmeSQueue *sq) 6975 { 6976 ldl_le_pci_dma(PCI_DEVICE(sq->ctrl), sq->db_addr, &sq->tail, 6977 MEMTXATTRS_UNSPECIFIED); 6978 6979 trace_pci_nvme_update_sq_tail(sq->sqid, sq->tail); 6980 } 6981 6982 static void nvme_process_sq(void *opaque) 6983 { 6984 NvmeSQueue *sq = opaque; 6985 NvmeCtrl *n = sq->ctrl; 6986 NvmeCQueue *cq = n->cq[sq->cqid]; 6987 6988 uint16_t status; 6989 hwaddr addr; 6990 NvmeCmd cmd; 6991 NvmeRequest *req; 6992 6993 if (n->dbbuf_enabled) { 6994 nvme_update_sq_tail(sq); 6995 } 6996 6997 while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) { 6998 addr = sq->dma_addr + sq->head * n->sqe_size; 6999 if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) { 7000 trace_pci_nvme_err_addr_read(addr); 7001 trace_pci_nvme_err_cfs(); 7002 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); 7003 break; 7004 } 7005 nvme_inc_sq_head(sq); 7006 7007 req = QTAILQ_FIRST(&sq->req_list); 7008 QTAILQ_REMOVE(&sq->req_list, req, entry); 7009 QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry); 7010 nvme_req_clear(req); 7011 req->cqe.cid = cmd.cid; 7012 memcpy(&req->cmd, &cmd, sizeof(NvmeCmd)); 7013 7014 status = sq->sqid ? nvme_io_cmd(n, req) : 7015 nvme_admin_cmd(n, req); 7016 if (status != NVME_NO_COMPLETE) { 7017 req->status = status; 7018 nvme_enqueue_req_completion(cq, req); 7019 } 7020 7021 if (n->dbbuf_enabled) { 7022 nvme_update_sq_eventidx(sq); 7023 nvme_update_sq_tail(sq); 7024 } 7025 } 7026 } 7027 7028 static void nvme_update_msixcap_ts(PCIDevice *pci_dev, uint32_t table_size) 7029 { 7030 uint8_t *config; 7031 7032 if (!msix_present(pci_dev)) { 7033 return; 7034 } 7035 7036 assert(table_size > 0 && table_size <= pci_dev->msix_entries_nr); 7037 7038 config = pci_dev->config + pci_dev->msix_cap; 7039 pci_set_word_by_mask(config + PCI_MSIX_FLAGS, PCI_MSIX_FLAGS_QSIZE, 7040 table_size - 1); 7041 } 7042 7043 static void nvme_activate_virt_res(NvmeCtrl *n) 7044 { 7045 PCIDevice *pci_dev = PCI_DEVICE(n); 7046 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; 7047 NvmeSecCtrlEntry *sctrl; 7048 7049 /* -1 to account for the admin queue */ 7050 if (pci_is_vf(pci_dev)) { 7051 sctrl = nvme_sctrl(n); 7052 cap->vqprt = sctrl->nvq; 7053 cap->viprt = sctrl->nvi; 7054 n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; 7055 n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1; 7056 } else { 7057 cap->vqrfap = n->next_pri_ctrl_cap.vqrfap; 7058 cap->virfap = n->next_pri_ctrl_cap.virfap; 7059 n->conf_ioqpairs = le16_to_cpu(cap->vqprt) + 7060 le16_to_cpu(cap->vqrfap) - 1; 7061 n->conf_msix_qsize = le16_to_cpu(cap->viprt) + 7062 le16_to_cpu(cap->virfap); 7063 } 7064 } 7065 7066 static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst) 7067 { 7068 PCIDevice *pci_dev = PCI_DEVICE(n); 7069 NvmeSecCtrlEntry *sctrl; 7070 NvmeNamespace *ns; 7071 int i; 7072 7073 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 7074 ns = nvme_ns(n, i); 7075 if (!ns) { 7076 continue; 7077 } 7078 7079 nvme_ns_drain(ns); 7080 } 7081 7082 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { 7083 if (n->sq[i] != NULL) { 7084 nvme_free_sq(n->sq[i], n); 7085 } 7086 } 7087 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { 7088 if (n->cq[i] != NULL) { 7089 nvme_free_cq(n->cq[i], n); 7090 } 7091 } 7092 7093 while (!QTAILQ_EMPTY(&n->aer_queue)) { 7094 NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue); 7095 QTAILQ_REMOVE(&n->aer_queue, event, entry); 7096 g_free(event); 7097 } 7098 7099 if (n->params.sriov_max_vfs) { 7100 if (!pci_is_vf(pci_dev)) { 7101 for (i = 0; i < n->sec_ctrl_list.numcntl; i++) { 7102 sctrl = &n->sec_ctrl_list.sec[i]; 7103 nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); 7104 } 7105 7106 if (rst != NVME_RESET_CONTROLLER) { 7107 pcie_sriov_pf_disable_vfs(pci_dev); 7108 } 7109 } 7110 7111 if (rst != NVME_RESET_CONTROLLER) { 7112 nvme_activate_virt_res(n); 7113 } 7114 } 7115 7116 n->aer_queued = 0; 7117 n->aer_mask = 0; 7118 n->outstanding_aers = 0; 7119 n->qs_created = false; 7120 7121 nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); 7122 7123 if (pci_is_vf(pci_dev)) { 7124 sctrl = nvme_sctrl(n); 7125 7126 stl_le_p(&n->bar.csts, sctrl->scs ? 0 : NVME_CSTS_FAILED); 7127 } else { 7128 stl_le_p(&n->bar.csts, 0); 7129 } 7130 7131 stl_le_p(&n->bar.intms, 0); 7132 stl_le_p(&n->bar.intmc, 0); 7133 stl_le_p(&n->bar.cc, 0); 7134 7135 n->dbbuf_dbs = 0; 7136 n->dbbuf_eis = 0; 7137 n->dbbuf_enabled = false; 7138 } 7139 7140 static void nvme_ctrl_shutdown(NvmeCtrl *n) 7141 { 7142 NvmeNamespace *ns; 7143 int i; 7144 7145 if (n->pmr.dev) { 7146 memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); 7147 } 7148 7149 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 7150 ns = nvme_ns(n, i); 7151 if (!ns) { 7152 continue; 7153 } 7154 7155 nvme_ns_shutdown(ns); 7156 } 7157 } 7158 7159 static void nvme_select_iocs(NvmeCtrl *n) 7160 { 7161 NvmeNamespace *ns; 7162 int i; 7163 7164 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 7165 ns = nvme_ns(n, i); 7166 if (!ns) { 7167 continue; 7168 } 7169 7170 nvme_select_iocs_ns(n, ns); 7171 } 7172 } 7173 7174 static int nvme_start_ctrl(NvmeCtrl *n) 7175 { 7176 uint64_t cap = ldq_le_p(&n->bar.cap); 7177 uint32_t cc = ldl_le_p(&n->bar.cc); 7178 uint32_t aqa = ldl_le_p(&n->bar.aqa); 7179 uint64_t asq = ldq_le_p(&n->bar.asq); 7180 uint64_t acq = ldq_le_p(&n->bar.acq); 7181 uint32_t page_bits = NVME_CC_MPS(cc) + 12; 7182 uint32_t page_size = 1 << page_bits; 7183 NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); 7184 7185 if (pci_is_vf(PCI_DEVICE(n)) && !sctrl->scs) { 7186 trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi), 7187 le16_to_cpu(sctrl->nvq)); 7188 return -1; 7189 } 7190 if (unlikely(n->cq[0])) { 7191 trace_pci_nvme_err_startfail_cq(); 7192 return -1; 7193 } 7194 if (unlikely(n->sq[0])) { 7195 trace_pci_nvme_err_startfail_sq(); 7196 return -1; 7197 } 7198 if (unlikely(asq & (page_size - 1))) { 7199 trace_pci_nvme_err_startfail_asq_misaligned(asq); 7200 return -1; 7201 } 7202 if (unlikely(acq & (page_size - 1))) { 7203 trace_pci_nvme_err_startfail_acq_misaligned(acq); 7204 return -1; 7205 } 7206 if (unlikely(!(NVME_CAP_CSS(cap) & (1 << NVME_CC_CSS(cc))))) { 7207 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(cc)); 7208 return -1; 7209 } 7210 if (unlikely(NVME_CC_MPS(cc) < NVME_CAP_MPSMIN(cap))) { 7211 trace_pci_nvme_err_startfail_page_too_small( 7212 NVME_CC_MPS(cc), 7213 NVME_CAP_MPSMIN(cap)); 7214 return -1; 7215 } 7216 if (unlikely(NVME_CC_MPS(cc) > 7217 NVME_CAP_MPSMAX(cap))) { 7218 trace_pci_nvme_err_startfail_page_too_large( 7219 NVME_CC_MPS(cc), 7220 NVME_CAP_MPSMAX(cap)); 7221 return -1; 7222 } 7223 if (unlikely(NVME_CC_IOCQES(cc) < 7224 NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) { 7225 trace_pci_nvme_err_startfail_cqent_too_small( 7226 NVME_CC_IOCQES(cc), 7227 NVME_CTRL_CQES_MIN(cap)); 7228 return -1; 7229 } 7230 if (unlikely(NVME_CC_IOCQES(cc) > 7231 NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) { 7232 trace_pci_nvme_err_startfail_cqent_too_large( 7233 NVME_CC_IOCQES(cc), 7234 NVME_CTRL_CQES_MAX(cap)); 7235 return -1; 7236 } 7237 if (unlikely(NVME_CC_IOSQES(cc) < 7238 NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) { 7239 trace_pci_nvme_err_startfail_sqent_too_small( 7240 NVME_CC_IOSQES(cc), 7241 NVME_CTRL_SQES_MIN(cap)); 7242 return -1; 7243 } 7244 if (unlikely(NVME_CC_IOSQES(cc) > 7245 NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) { 7246 trace_pci_nvme_err_startfail_sqent_too_large( 7247 NVME_CC_IOSQES(cc), 7248 NVME_CTRL_SQES_MAX(cap)); 7249 return -1; 7250 } 7251 if (unlikely(!NVME_AQA_ASQS(aqa))) { 7252 trace_pci_nvme_err_startfail_asqent_sz_zero(); 7253 return -1; 7254 } 7255 if (unlikely(!NVME_AQA_ACQS(aqa))) { 7256 trace_pci_nvme_err_startfail_acqent_sz_zero(); 7257 return -1; 7258 } 7259 7260 n->page_bits = page_bits; 7261 n->page_size = page_size; 7262 n->max_prp_ents = n->page_size / sizeof(uint64_t); 7263 n->cqe_size = 1 << NVME_CC_IOCQES(cc); 7264 n->sqe_size = 1 << NVME_CC_IOSQES(cc); 7265 nvme_init_cq(&n->admin_cq, n, acq, 0, 0, NVME_AQA_ACQS(aqa) + 1, 1); 7266 nvme_init_sq(&n->admin_sq, n, asq, 0, 0, NVME_AQA_ASQS(aqa) + 1); 7267 7268 nvme_set_timestamp(n, 0ULL); 7269 7270 nvme_select_iocs(n); 7271 7272 return 0; 7273 } 7274 7275 static void nvme_cmb_enable_regs(NvmeCtrl *n) 7276 { 7277 uint32_t cmbloc = ldl_le_p(&n->bar.cmbloc); 7278 uint32_t cmbsz = ldl_le_p(&n->bar.cmbsz); 7279 7280 NVME_CMBLOC_SET_CDPCILS(cmbloc, 1); 7281 NVME_CMBLOC_SET_CDPMLS(cmbloc, 1); 7282 NVME_CMBLOC_SET_BIR(cmbloc, NVME_CMB_BIR); 7283 stl_le_p(&n->bar.cmbloc, cmbloc); 7284 7285 NVME_CMBSZ_SET_SQS(cmbsz, 1); 7286 NVME_CMBSZ_SET_CQS(cmbsz, 0); 7287 NVME_CMBSZ_SET_LISTS(cmbsz, 1); 7288 NVME_CMBSZ_SET_RDS(cmbsz, 1); 7289 NVME_CMBSZ_SET_WDS(cmbsz, 1); 7290 NVME_CMBSZ_SET_SZU(cmbsz, 2); /* MBs */ 7291 NVME_CMBSZ_SET_SZ(cmbsz, n->params.cmb_size_mb); 7292 stl_le_p(&n->bar.cmbsz, cmbsz); 7293 } 7294 7295 static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, 7296 unsigned size) 7297 { 7298 PCIDevice *pci = PCI_DEVICE(n); 7299 uint64_t cap = ldq_le_p(&n->bar.cap); 7300 uint32_t cc = ldl_le_p(&n->bar.cc); 7301 uint32_t intms = ldl_le_p(&n->bar.intms); 7302 uint32_t csts = ldl_le_p(&n->bar.csts); 7303 uint32_t pmrsts = ldl_le_p(&n->bar.pmrsts); 7304 7305 if (unlikely(offset & (sizeof(uint32_t) - 1))) { 7306 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32, 7307 "MMIO write not 32-bit aligned," 7308 " offset=0x%"PRIx64"", offset); 7309 /* should be ignored, fall through for now */ 7310 } 7311 7312 if (unlikely(size < sizeof(uint32_t))) { 7313 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall, 7314 "MMIO write smaller than 32-bits," 7315 " offset=0x%"PRIx64", size=%u", 7316 offset, size); 7317 /* should be ignored, fall through for now */ 7318 } 7319 7320 switch (offset) { 7321 case NVME_REG_INTMS: 7322 if (unlikely(msix_enabled(pci))) { 7323 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, 7324 "undefined access to interrupt mask set" 7325 " when MSI-X is enabled"); 7326 /* should be ignored, fall through for now */ 7327 } 7328 intms |= data; 7329 stl_le_p(&n->bar.intms, intms); 7330 n->bar.intmc = n->bar.intms; 7331 trace_pci_nvme_mmio_intm_set(data & 0xffffffff, intms); 7332 nvme_irq_check(n); 7333 break; 7334 case NVME_REG_INTMC: 7335 if (unlikely(msix_enabled(pci))) { 7336 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, 7337 "undefined access to interrupt mask clr" 7338 " when MSI-X is enabled"); 7339 /* should be ignored, fall through for now */ 7340 } 7341 intms &= ~data; 7342 stl_le_p(&n->bar.intms, intms); 7343 n->bar.intmc = n->bar.intms; 7344 trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, intms); 7345 nvme_irq_check(n); 7346 break; 7347 case NVME_REG_CC: 7348 stl_le_p(&n->bar.cc, data); 7349 7350 trace_pci_nvme_mmio_cfg(data & 0xffffffff); 7351 7352 if (NVME_CC_SHN(data) && !(NVME_CC_SHN(cc))) { 7353 trace_pci_nvme_mmio_shutdown_set(); 7354 nvme_ctrl_shutdown(n); 7355 csts &= ~(CSTS_SHST_MASK << CSTS_SHST_SHIFT); 7356 csts |= NVME_CSTS_SHST_COMPLETE; 7357 } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(cc)) { 7358 trace_pci_nvme_mmio_shutdown_cleared(); 7359 csts &= ~(CSTS_SHST_MASK << CSTS_SHST_SHIFT); 7360 } 7361 7362 if (NVME_CC_EN(data) && !NVME_CC_EN(cc)) { 7363 if (unlikely(nvme_start_ctrl(n))) { 7364 trace_pci_nvme_err_startfail(); 7365 csts = NVME_CSTS_FAILED; 7366 } else { 7367 trace_pci_nvme_mmio_start_success(); 7368 csts = NVME_CSTS_READY; 7369 } 7370 } else if (!NVME_CC_EN(data) && NVME_CC_EN(cc)) { 7371 trace_pci_nvme_mmio_stopped(); 7372 nvme_ctrl_reset(n, NVME_RESET_CONTROLLER); 7373 7374 break; 7375 } 7376 7377 stl_le_p(&n->bar.csts, csts); 7378 7379 break; 7380 case NVME_REG_CSTS: 7381 if (data & (1 << 4)) { 7382 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported, 7383 "attempted to W1C CSTS.NSSRO" 7384 " but CAP.NSSRS is zero (not supported)"); 7385 } else if (data != 0) { 7386 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts, 7387 "attempted to set a read only bit" 7388 " of controller status"); 7389 } 7390 break; 7391 case NVME_REG_NSSR: 7392 if (data == 0x4e564d65) { 7393 trace_pci_nvme_ub_mmiowr_ssreset_unsupported(); 7394 } else { 7395 /* The spec says that writes of other values have no effect */ 7396 return; 7397 } 7398 break; 7399 case NVME_REG_AQA: 7400 stl_le_p(&n->bar.aqa, data); 7401 trace_pci_nvme_mmio_aqattr(data & 0xffffffff); 7402 break; 7403 case NVME_REG_ASQ: 7404 stn_le_p(&n->bar.asq, size, data); 7405 trace_pci_nvme_mmio_asqaddr(data); 7406 break; 7407 case NVME_REG_ASQ + 4: 7408 stl_le_p((uint8_t *)&n->bar.asq + 4, data); 7409 trace_pci_nvme_mmio_asqaddr_hi(data, ldq_le_p(&n->bar.asq)); 7410 break; 7411 case NVME_REG_ACQ: 7412 trace_pci_nvme_mmio_acqaddr(data); 7413 stn_le_p(&n->bar.acq, size, data); 7414 break; 7415 case NVME_REG_ACQ + 4: 7416 stl_le_p((uint8_t *)&n->bar.acq + 4, data); 7417 trace_pci_nvme_mmio_acqaddr_hi(data, ldq_le_p(&n->bar.acq)); 7418 break; 7419 case NVME_REG_CMBLOC: 7420 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved, 7421 "invalid write to reserved CMBLOC" 7422 " when CMBSZ is zero, ignored"); 7423 return; 7424 case NVME_REG_CMBSZ: 7425 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly, 7426 "invalid write to read only CMBSZ, ignored"); 7427 return; 7428 case NVME_REG_CMBMSC: 7429 if (!NVME_CAP_CMBS(cap)) { 7430 return; 7431 } 7432 7433 stn_le_p(&n->bar.cmbmsc, size, data); 7434 n->cmb.cmse = false; 7435 7436 if (NVME_CMBMSC_CRE(data)) { 7437 nvme_cmb_enable_regs(n); 7438 7439 if (NVME_CMBMSC_CMSE(data)) { 7440 uint64_t cmbmsc = ldq_le_p(&n->bar.cmbmsc); 7441 hwaddr cba = NVME_CMBMSC_CBA(cmbmsc) << CMBMSC_CBA_SHIFT; 7442 if (cba + int128_get64(n->cmb.mem.size) < cba) { 7443 uint32_t cmbsts = ldl_le_p(&n->bar.cmbsts); 7444 NVME_CMBSTS_SET_CBAI(cmbsts, 1); 7445 stl_le_p(&n->bar.cmbsts, cmbsts); 7446 return; 7447 } 7448 7449 n->cmb.cba = cba; 7450 n->cmb.cmse = true; 7451 } 7452 } else { 7453 n->bar.cmbsz = 0; 7454 n->bar.cmbloc = 0; 7455 } 7456 7457 return; 7458 case NVME_REG_CMBMSC + 4: 7459 stl_le_p((uint8_t *)&n->bar.cmbmsc + 4, data); 7460 return; 7461 7462 case NVME_REG_PMRCAP: 7463 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly, 7464 "invalid write to PMRCAP register, ignored"); 7465 return; 7466 case NVME_REG_PMRCTL: 7467 if (!NVME_CAP_PMRS(cap)) { 7468 return; 7469 } 7470 7471 stl_le_p(&n->bar.pmrctl, data); 7472 if (NVME_PMRCTL_EN(data)) { 7473 memory_region_set_enabled(&n->pmr.dev->mr, true); 7474 pmrsts = 0; 7475 } else { 7476 memory_region_set_enabled(&n->pmr.dev->mr, false); 7477 NVME_PMRSTS_SET_NRDY(pmrsts, 1); 7478 n->pmr.cmse = false; 7479 } 7480 stl_le_p(&n->bar.pmrsts, pmrsts); 7481 return; 7482 case NVME_REG_PMRSTS: 7483 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly, 7484 "invalid write to PMRSTS register, ignored"); 7485 return; 7486 case NVME_REG_PMREBS: 7487 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly, 7488 "invalid write to PMREBS register, ignored"); 7489 return; 7490 case NVME_REG_PMRSWTP: 7491 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly, 7492 "invalid write to PMRSWTP register, ignored"); 7493 return; 7494 case NVME_REG_PMRMSCL: 7495 if (!NVME_CAP_PMRS(cap)) { 7496 return; 7497 } 7498 7499 stl_le_p(&n->bar.pmrmscl, data); 7500 n->pmr.cmse = false; 7501 7502 if (NVME_PMRMSCL_CMSE(data)) { 7503 uint64_t pmrmscu = ldl_le_p(&n->bar.pmrmscu); 7504 hwaddr cba = pmrmscu << 32 | 7505 (NVME_PMRMSCL_CBA(data) << PMRMSCL_CBA_SHIFT); 7506 if (cba + int128_get64(n->pmr.dev->mr.size) < cba) { 7507 NVME_PMRSTS_SET_CBAI(pmrsts, 1); 7508 stl_le_p(&n->bar.pmrsts, pmrsts); 7509 return; 7510 } 7511 7512 n->pmr.cmse = true; 7513 n->pmr.cba = cba; 7514 } 7515 7516 return; 7517 case NVME_REG_PMRMSCU: 7518 if (!NVME_CAP_PMRS(cap)) { 7519 return; 7520 } 7521 7522 stl_le_p(&n->bar.pmrmscu, data); 7523 return; 7524 default: 7525 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid, 7526 "invalid MMIO write," 7527 " offset=0x%"PRIx64", data=%"PRIx64"", 7528 offset, data); 7529 break; 7530 } 7531 } 7532 7533 static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size) 7534 { 7535 NvmeCtrl *n = (NvmeCtrl *)opaque; 7536 uint8_t *ptr = (uint8_t *)&n->bar; 7537 7538 trace_pci_nvme_mmio_read(addr, size); 7539 7540 if (unlikely(addr & (sizeof(uint32_t) - 1))) { 7541 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32, 7542 "MMIO read not 32-bit aligned," 7543 " offset=0x%"PRIx64"", addr); 7544 /* should RAZ, fall through for now */ 7545 } else if (unlikely(size < sizeof(uint32_t))) { 7546 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall, 7547 "MMIO read smaller than 32-bits," 7548 " offset=0x%"PRIx64"", addr); 7549 /* should RAZ, fall through for now */ 7550 } 7551 7552 if (addr > sizeof(n->bar) - size) { 7553 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs, 7554 "MMIO read beyond last register," 7555 " offset=0x%"PRIx64", returning 0", addr); 7556 7557 return 0; 7558 } 7559 7560 if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs && 7561 addr != NVME_REG_CSTS) { 7562 trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size); 7563 return 0; 7564 } 7565 7566 /* 7567 * When PMRWBM bit 1 is set then read from 7568 * from PMRSTS should ensure prior writes 7569 * made it to persistent media 7570 */ 7571 if (addr == NVME_REG_PMRSTS && 7572 (NVME_PMRCAP_PMRWBM(ldl_le_p(&n->bar.pmrcap)) & 0x02)) { 7573 memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); 7574 } 7575 7576 return ldn_le_p(ptr + addr, size); 7577 } 7578 7579 static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) 7580 { 7581 PCIDevice *pci = PCI_DEVICE(n); 7582 uint32_t qid; 7583 7584 if (unlikely(addr & ((1 << 2) - 1))) { 7585 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned, 7586 "doorbell write not 32-bit aligned," 7587 " offset=0x%"PRIx64", ignoring", addr); 7588 return; 7589 } 7590 7591 if (((addr - 0x1000) >> 2) & 1) { 7592 /* Completion queue doorbell write */ 7593 7594 uint16_t new_head = val & 0xffff; 7595 int start_sqs; 7596 NvmeCQueue *cq; 7597 7598 qid = (addr - (0x1000 + (1 << 2))) >> 3; 7599 if (unlikely(nvme_check_cqid(n, qid))) { 7600 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq, 7601 "completion queue doorbell write" 7602 " for nonexistent queue," 7603 " sqid=%"PRIu32", ignoring", qid); 7604 7605 /* 7606 * NVM Express v1.3d, Section 4.1 state: "If host software writes 7607 * an invalid value to the Submission Queue Tail Doorbell or 7608 * Completion Queue Head Doorbell regiter and an Asynchronous Event 7609 * Request command is outstanding, then an asynchronous event is 7610 * posted to the Admin Completion Queue with a status code of 7611 * Invalid Doorbell Write Value." 7612 * 7613 * Also note that the spec includes the "Invalid Doorbell Register" 7614 * status code, but nowhere does it specify when to use it. 7615 * However, it seems reasonable to use it here in a similar 7616 * fashion. 7617 */ 7618 if (n->outstanding_aers) { 7619 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, 7620 NVME_AER_INFO_ERR_INVALID_DB_REGISTER, 7621 NVME_LOG_ERROR_INFO); 7622 } 7623 7624 return; 7625 } 7626 7627 cq = n->cq[qid]; 7628 if (unlikely(new_head >= cq->size)) { 7629 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead, 7630 "completion queue doorbell write value" 7631 " beyond queue size, sqid=%"PRIu32"," 7632 " new_head=%"PRIu16", ignoring", 7633 qid, new_head); 7634 7635 if (n->outstanding_aers) { 7636 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, 7637 NVME_AER_INFO_ERR_INVALID_DB_VALUE, 7638 NVME_LOG_ERROR_INFO); 7639 } 7640 7641 return; 7642 } 7643 7644 trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head); 7645 7646 start_sqs = nvme_cq_full(cq) ? 1 : 0; 7647 cq->head = new_head; 7648 if (!qid && n->dbbuf_enabled) { 7649 stl_le_pci_dma(pci, cq->db_addr, cq->head, MEMTXATTRS_UNSPECIFIED); 7650 } 7651 if (start_sqs) { 7652 NvmeSQueue *sq; 7653 QTAILQ_FOREACH(sq, &cq->sq_list, entry) { 7654 qemu_bh_schedule(sq->bh); 7655 } 7656 qemu_bh_schedule(cq->bh); 7657 } 7658 7659 if (cq->tail == cq->head) { 7660 if (cq->irq_enabled) { 7661 n->cq_pending--; 7662 } 7663 7664 nvme_irq_deassert(n, cq); 7665 } 7666 } else { 7667 /* Submission queue doorbell write */ 7668 7669 uint16_t new_tail = val & 0xffff; 7670 NvmeSQueue *sq; 7671 7672 qid = (addr - 0x1000) >> 3; 7673 if (unlikely(nvme_check_sqid(n, qid))) { 7674 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq, 7675 "submission queue doorbell write" 7676 " for nonexistent queue," 7677 " sqid=%"PRIu32", ignoring", qid); 7678 7679 if (n->outstanding_aers) { 7680 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, 7681 NVME_AER_INFO_ERR_INVALID_DB_REGISTER, 7682 NVME_LOG_ERROR_INFO); 7683 } 7684 7685 return; 7686 } 7687 7688 sq = n->sq[qid]; 7689 if (unlikely(new_tail >= sq->size)) { 7690 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail, 7691 "submission queue doorbell write value" 7692 " beyond queue size, sqid=%"PRIu32"," 7693 " new_tail=%"PRIu16", ignoring", 7694 qid, new_tail); 7695 7696 if (n->outstanding_aers) { 7697 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, 7698 NVME_AER_INFO_ERR_INVALID_DB_VALUE, 7699 NVME_LOG_ERROR_INFO); 7700 } 7701 7702 return; 7703 } 7704 7705 trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail); 7706 7707 sq->tail = new_tail; 7708 if (!qid && n->dbbuf_enabled) { 7709 /* 7710 * The spec states "the host shall also update the controller's 7711 * corresponding doorbell property to match the value of that entry 7712 * in the Shadow Doorbell buffer." 7713 * 7714 * Since this context is currently a VM trap, we can safely enforce 7715 * the requirement from the device side in case the host is 7716 * misbehaving. 7717 * 7718 * Note, we shouldn't have to do this, but various drivers 7719 * including ones that run on Linux, are not updating Admin Queues, 7720 * so we can't trust reading it for an appropriate sq tail. 7721 */ 7722 stl_le_pci_dma(pci, sq->db_addr, sq->tail, MEMTXATTRS_UNSPECIFIED); 7723 } 7724 7725 qemu_bh_schedule(sq->bh); 7726 } 7727 } 7728 7729 static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data, 7730 unsigned size) 7731 { 7732 NvmeCtrl *n = (NvmeCtrl *)opaque; 7733 7734 trace_pci_nvme_mmio_write(addr, data, size); 7735 7736 if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs && 7737 addr != NVME_REG_CSTS) { 7738 trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size); 7739 return; 7740 } 7741 7742 if (addr < sizeof(n->bar)) { 7743 nvme_write_bar(n, addr, data, size); 7744 } else { 7745 nvme_process_db(n, addr, data); 7746 } 7747 } 7748 7749 static const MemoryRegionOps nvme_mmio_ops = { 7750 .read = nvme_mmio_read, 7751 .write = nvme_mmio_write, 7752 .endianness = DEVICE_LITTLE_ENDIAN, 7753 .impl = { 7754 .min_access_size = 2, 7755 .max_access_size = 8, 7756 }, 7757 }; 7758 7759 static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data, 7760 unsigned size) 7761 { 7762 NvmeCtrl *n = (NvmeCtrl *)opaque; 7763 stn_le_p(&n->cmb.buf[addr], size, data); 7764 } 7765 7766 static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size) 7767 { 7768 NvmeCtrl *n = (NvmeCtrl *)opaque; 7769 return ldn_le_p(&n->cmb.buf[addr], size); 7770 } 7771 7772 static const MemoryRegionOps nvme_cmb_ops = { 7773 .read = nvme_cmb_read, 7774 .write = nvme_cmb_write, 7775 .endianness = DEVICE_LITTLE_ENDIAN, 7776 .impl = { 7777 .min_access_size = 1, 7778 .max_access_size = 8, 7779 }, 7780 }; 7781 7782 static bool nvme_check_params(NvmeCtrl *n, Error **errp) 7783 { 7784 NvmeParams *params = &n->params; 7785 7786 if (params->num_queues) { 7787 warn_report("num_queues is deprecated; please use max_ioqpairs " 7788 "instead"); 7789 7790 params->max_ioqpairs = params->num_queues - 1; 7791 } 7792 7793 if (n->namespace.blkconf.blk && n->subsys) { 7794 error_setg(errp, "subsystem support is unavailable with legacy " 7795 "namespace ('drive' property)"); 7796 return false; 7797 } 7798 7799 if (params->max_ioqpairs < 1 || 7800 params->max_ioqpairs > NVME_MAX_IOQPAIRS) { 7801 error_setg(errp, "max_ioqpairs must be between 1 and %d", 7802 NVME_MAX_IOQPAIRS); 7803 return false; 7804 } 7805 7806 if (params->msix_qsize < 1 || 7807 params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) { 7808 error_setg(errp, "msix_qsize must be between 1 and %d", 7809 PCI_MSIX_FLAGS_QSIZE + 1); 7810 return false; 7811 } 7812 7813 if (!params->serial) { 7814 error_setg(errp, "serial property not set"); 7815 return false; 7816 } 7817 7818 if (n->pmr.dev) { 7819 if (host_memory_backend_is_mapped(n->pmr.dev)) { 7820 error_setg(errp, "can't use already busy memdev: %s", 7821 object_get_canonical_path_component(OBJECT(n->pmr.dev))); 7822 return false; 7823 } 7824 7825 if (!is_power_of_2(n->pmr.dev->size)) { 7826 error_setg(errp, "pmr backend size needs to be power of 2 in size"); 7827 return false; 7828 } 7829 7830 host_memory_backend_set_mapped(n->pmr.dev, true); 7831 } 7832 7833 if (n->params.zasl > n->params.mdts) { 7834 error_setg(errp, "zoned.zasl (Zone Append Size Limit) must be less " 7835 "than or equal to mdts (Maximum Data Transfer Size)"); 7836 return false; 7837 } 7838 7839 if (!n->params.vsl) { 7840 error_setg(errp, "vsl must be non-zero"); 7841 return false; 7842 } 7843 7844 if (params->sriov_max_vfs) { 7845 if (!n->subsys) { 7846 error_setg(errp, "subsystem is required for the use of SR-IOV"); 7847 return false; 7848 } 7849 7850 if (params->sriov_max_vfs > NVME_MAX_VFS) { 7851 error_setg(errp, "sriov_max_vfs must be between 0 and %d", 7852 NVME_MAX_VFS); 7853 return false; 7854 } 7855 7856 if (params->cmb_size_mb) { 7857 error_setg(errp, "CMB is not supported with SR-IOV"); 7858 return false; 7859 } 7860 7861 if (n->pmr.dev) { 7862 error_setg(errp, "PMR is not supported with SR-IOV"); 7863 return false; 7864 } 7865 7866 if (!params->sriov_vq_flexible || !params->sriov_vi_flexible) { 7867 error_setg(errp, "both sriov_vq_flexible and sriov_vi_flexible" 7868 " must be set for the use of SR-IOV"); 7869 return false; 7870 } 7871 7872 if (params->sriov_vq_flexible < params->sriov_max_vfs * 2) { 7873 error_setg(errp, "sriov_vq_flexible must be greater than or equal" 7874 " to %d (sriov_max_vfs * 2)", params->sriov_max_vfs * 2); 7875 return false; 7876 } 7877 7878 if (params->max_ioqpairs < params->sriov_vq_flexible + 2) { 7879 error_setg(errp, "(max_ioqpairs - sriov_vq_flexible) must be" 7880 " greater than or equal to 2"); 7881 return false; 7882 } 7883 7884 if (params->sriov_vi_flexible < params->sriov_max_vfs) { 7885 error_setg(errp, "sriov_vi_flexible must be greater than or equal" 7886 " to %d (sriov_max_vfs)", params->sriov_max_vfs); 7887 return false; 7888 } 7889 7890 if (params->msix_qsize < params->sriov_vi_flexible + 1) { 7891 error_setg(errp, "(msix_qsize - sriov_vi_flexible) must be" 7892 " greater than or equal to 1"); 7893 return false; 7894 } 7895 7896 if (params->sriov_max_vi_per_vf && 7897 (params->sriov_max_vi_per_vf - 1) % NVME_VF_RES_GRANULARITY) { 7898 error_setg(errp, "sriov_max_vi_per_vf must meet:" 7899 " (sriov_max_vi_per_vf - 1) %% %d == 0 and" 7900 " sriov_max_vi_per_vf >= 1", NVME_VF_RES_GRANULARITY); 7901 return false; 7902 } 7903 7904 if (params->sriov_max_vq_per_vf && 7905 (params->sriov_max_vq_per_vf < 2 || 7906 (params->sriov_max_vq_per_vf - 1) % NVME_VF_RES_GRANULARITY)) { 7907 error_setg(errp, "sriov_max_vq_per_vf must meet:" 7908 " (sriov_max_vq_per_vf - 1) %% %d == 0 and" 7909 " sriov_max_vq_per_vf >= 2", NVME_VF_RES_GRANULARITY); 7910 return false; 7911 } 7912 } 7913 7914 return true; 7915 } 7916 7917 static void nvme_init_state(NvmeCtrl *n) 7918 { 7919 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; 7920 NvmeSecCtrlList *list = &n->sec_ctrl_list; 7921 NvmeSecCtrlEntry *sctrl; 7922 PCIDevice *pci = PCI_DEVICE(n); 7923 uint8_t max_vfs; 7924 int i; 7925 7926 if (pci_is_vf(pci)) { 7927 sctrl = nvme_sctrl(n); 7928 max_vfs = 0; 7929 n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; 7930 n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1; 7931 } else { 7932 max_vfs = n->params.sriov_max_vfs; 7933 n->conf_ioqpairs = n->params.max_ioqpairs; 7934 n->conf_msix_qsize = n->params.msix_qsize; 7935 } 7936 7937 n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1); 7938 n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1); 7939 n->temperature = NVME_TEMPERATURE; 7940 n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING; 7941 n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 7942 n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1); 7943 QTAILQ_INIT(&n->aer_queue); 7944 7945 list->numcntl = cpu_to_le16(max_vfs); 7946 for (i = 0; i < max_vfs; i++) { 7947 sctrl = &list->sec[i]; 7948 sctrl->pcid = cpu_to_le16(n->cntlid); 7949 sctrl->vfn = cpu_to_le16(i + 1); 7950 } 7951 7952 cap->cntlid = cpu_to_le16(n->cntlid); 7953 cap->crt = NVME_CRT_VQ | NVME_CRT_VI; 7954 7955 if (pci_is_vf(pci)) { 7956 cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs); 7957 } else { 7958 cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs - 7959 n->params.sriov_vq_flexible); 7960 cap->vqfrt = cpu_to_le32(n->params.sriov_vq_flexible); 7961 cap->vqrfap = cap->vqfrt; 7962 cap->vqgran = cpu_to_le16(NVME_VF_RES_GRANULARITY); 7963 cap->vqfrsm = n->params.sriov_max_vq_per_vf ? 7964 cpu_to_le16(n->params.sriov_max_vq_per_vf) : 7965 cap->vqfrt / MAX(max_vfs, 1); 7966 } 7967 7968 if (pci_is_vf(pci)) { 7969 cap->viprt = cpu_to_le16(n->conf_msix_qsize); 7970 } else { 7971 cap->viprt = cpu_to_le16(n->params.msix_qsize - 7972 n->params.sriov_vi_flexible); 7973 cap->vifrt = cpu_to_le32(n->params.sriov_vi_flexible); 7974 cap->virfap = cap->vifrt; 7975 cap->vigran = cpu_to_le16(NVME_VF_RES_GRANULARITY); 7976 cap->vifrsm = n->params.sriov_max_vi_per_vf ? 7977 cpu_to_le16(n->params.sriov_max_vi_per_vf) : 7978 cap->vifrt / MAX(max_vfs, 1); 7979 } 7980 } 7981 7982 static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev) 7983 { 7984 uint64_t cmb_size = n->params.cmb_size_mb * MiB; 7985 uint64_t cap = ldq_le_p(&n->bar.cap); 7986 7987 n->cmb.buf = g_malloc0(cmb_size); 7988 memory_region_init_io(&n->cmb.mem, OBJECT(n), &nvme_cmb_ops, n, 7989 "nvme-cmb", cmb_size); 7990 pci_register_bar(pci_dev, NVME_CMB_BIR, 7991 PCI_BASE_ADDRESS_SPACE_MEMORY | 7992 PCI_BASE_ADDRESS_MEM_TYPE_64 | 7993 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->cmb.mem); 7994 7995 NVME_CAP_SET_CMBS(cap, 1); 7996 stq_le_p(&n->bar.cap, cap); 7997 7998 if (n->params.legacy_cmb) { 7999 nvme_cmb_enable_regs(n); 8000 n->cmb.cmse = true; 8001 } 8002 } 8003 8004 static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev) 8005 { 8006 uint32_t pmrcap = ldl_le_p(&n->bar.pmrcap); 8007 8008 NVME_PMRCAP_SET_RDS(pmrcap, 1); 8009 NVME_PMRCAP_SET_WDS(pmrcap, 1); 8010 NVME_PMRCAP_SET_BIR(pmrcap, NVME_PMR_BIR); 8011 /* Turn on bit 1 support */ 8012 NVME_PMRCAP_SET_PMRWBM(pmrcap, 0x02); 8013 NVME_PMRCAP_SET_CMSS(pmrcap, 1); 8014 stl_le_p(&n->bar.pmrcap, pmrcap); 8015 8016 pci_register_bar(pci_dev, NVME_PMR_BIR, 8017 PCI_BASE_ADDRESS_SPACE_MEMORY | 8018 PCI_BASE_ADDRESS_MEM_TYPE_64 | 8019 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmr.dev->mr); 8020 8021 memory_region_set_enabled(&n->pmr.dev->mr, false); 8022 } 8023 8024 static uint64_t nvme_bar_size(unsigned total_queues, unsigned total_irqs, 8025 unsigned *msix_table_offset, 8026 unsigned *msix_pba_offset) 8027 { 8028 uint64_t bar_size, msix_table_size, msix_pba_size; 8029 8030 bar_size = sizeof(NvmeBar) + 2 * total_queues * NVME_DB_SIZE; 8031 bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); 8032 8033 if (msix_table_offset) { 8034 *msix_table_offset = bar_size; 8035 } 8036 8037 msix_table_size = PCI_MSIX_ENTRY_SIZE * total_irqs; 8038 bar_size += msix_table_size; 8039 bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); 8040 8041 if (msix_pba_offset) { 8042 *msix_pba_offset = bar_size; 8043 } 8044 8045 msix_pba_size = QEMU_ALIGN_UP(total_irqs, 64) / 8; 8046 bar_size += msix_pba_size; 8047 8048 bar_size = pow2ceil(bar_size); 8049 return bar_size; 8050 } 8051 8052 static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset) 8053 { 8054 uint16_t vf_dev_id = n->params.use_intel_id ? 8055 PCI_DEVICE_ID_INTEL_NVME : PCI_DEVICE_ID_REDHAT_NVME; 8056 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; 8057 uint64_t bar_size = nvme_bar_size(le16_to_cpu(cap->vqfrsm), 8058 le16_to_cpu(cap->vifrsm), 8059 NULL, NULL); 8060 8061 pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id, 8062 n->params.sriov_max_vfs, n->params.sriov_max_vfs, 8063 NVME_VF_OFFSET, NVME_VF_STRIDE); 8064 8065 pcie_sriov_pf_init_vf_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | 8066 PCI_BASE_ADDRESS_MEM_TYPE_64, bar_size); 8067 } 8068 8069 static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset) 8070 { 8071 Error *err = NULL; 8072 int ret; 8073 8074 ret = pci_add_capability(pci_dev, PCI_CAP_ID_PM, offset, 8075 PCI_PM_SIZEOF, &err); 8076 if (err) { 8077 error_report_err(err); 8078 return ret; 8079 } 8080 8081 pci_set_word(pci_dev->config + offset + PCI_PM_PMC, 8082 PCI_PM_CAP_VER_1_2); 8083 pci_set_word(pci_dev->config + offset + PCI_PM_CTRL, 8084 PCI_PM_CTRL_NO_SOFT_RESET); 8085 pci_set_word(pci_dev->wmask + offset + PCI_PM_CTRL, 8086 PCI_PM_CTRL_STATE_MASK); 8087 8088 return 0; 8089 } 8090 8091 static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) 8092 { 8093 ERRP_GUARD(); 8094 uint8_t *pci_conf = pci_dev->config; 8095 uint64_t bar_size; 8096 unsigned msix_table_offset, msix_pba_offset; 8097 int ret; 8098 8099 pci_conf[PCI_INTERRUPT_PIN] = 1; 8100 pci_config_set_prog_interface(pci_conf, 0x2); 8101 8102 if (n->params.use_intel_id) { 8103 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); 8104 pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_INTEL_NVME); 8105 } else { 8106 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REDHAT); 8107 pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_REDHAT_NVME); 8108 } 8109 8110 pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS); 8111 nvme_add_pm_capability(pci_dev, 0x60); 8112 pcie_endpoint_cap_init(pci_dev, 0x80); 8113 pcie_cap_flr_init(pci_dev); 8114 if (n->params.sriov_max_vfs) { 8115 pcie_ari_init(pci_dev, 0x100); 8116 } 8117 8118 /* add one to max_ioqpairs to account for the admin queue pair */ 8119 bar_size = nvme_bar_size(n->params.max_ioqpairs + 1, n->params.msix_qsize, 8120 &msix_table_offset, &msix_pba_offset); 8121 8122 memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); 8123 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", 8124 msix_table_offset); 8125 memory_region_add_subregion(&n->bar0, 0, &n->iomem); 8126 8127 if (pci_is_vf(pci_dev)) { 8128 pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0); 8129 } else { 8130 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | 8131 PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); 8132 } 8133 ret = msix_init(pci_dev, n->params.msix_qsize, 8134 &n->bar0, 0, msix_table_offset, 8135 &n->bar0, 0, msix_pba_offset, 0, errp); 8136 if (ret == -ENOTSUP) { 8137 /* report that msix is not supported, but do not error out */ 8138 warn_report_err(*errp); 8139 *errp = NULL; 8140 } else if (ret < 0) { 8141 /* propagate error to caller */ 8142 return false; 8143 } 8144 8145 nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); 8146 8147 if (n->params.cmb_size_mb) { 8148 nvme_init_cmb(n, pci_dev); 8149 } 8150 8151 if (n->pmr.dev) { 8152 nvme_init_pmr(n, pci_dev); 8153 } 8154 8155 if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) { 8156 nvme_init_sriov(n, pci_dev, 0x120); 8157 } 8158 8159 return true; 8160 } 8161 8162 static void nvme_init_subnqn(NvmeCtrl *n) 8163 { 8164 NvmeSubsystem *subsys = n->subsys; 8165 NvmeIdCtrl *id = &n->id_ctrl; 8166 8167 if (!subsys) { 8168 snprintf((char *)id->subnqn, sizeof(id->subnqn), 8169 "nqn.2019-08.org.qemu:%s", n->params.serial); 8170 } else { 8171 pstrcpy((char *)id->subnqn, sizeof(id->subnqn), (char*)subsys->subnqn); 8172 } 8173 } 8174 8175 static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) 8176 { 8177 NvmeIdCtrl *id = &n->id_ctrl; 8178 uint8_t *pci_conf = pci_dev->config; 8179 uint64_t cap = ldq_le_p(&n->bar.cap); 8180 NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); 8181 uint32_t ctratt; 8182 8183 id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); 8184 id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); 8185 strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' '); 8186 strpadcpy((char *)id->fr, sizeof(id->fr), QEMU_VERSION, ' '); 8187 strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' '); 8188 8189 id->cntlid = cpu_to_le16(n->cntlid); 8190 8191 id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); 8192 ctratt = NVME_CTRATT_ELBAS; 8193 8194 id->rab = 6; 8195 8196 if (n->params.use_intel_id) { 8197 id->ieee[0] = 0xb3; 8198 id->ieee[1] = 0x02; 8199 id->ieee[2] = 0x00; 8200 } else { 8201 id->ieee[0] = 0x00; 8202 id->ieee[1] = 0x54; 8203 id->ieee[2] = 0x52; 8204 } 8205 8206 id->mdts = n->params.mdts; 8207 id->ver = cpu_to_le32(NVME_SPEC_VER); 8208 id->oacs = 8209 cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF | 8210 NVME_OACS_DIRECTIVES); 8211 id->cntrltype = 0x1; 8212 8213 /* 8214 * Because the controller always completes the Abort command immediately, 8215 * there can never be more than one concurrently executing Abort command, 8216 * so this value is never used for anything. Note that there can easily be 8217 * many Abort commands in the queues, but they are not considered 8218 * "executing" until processed by nvme_abort. 8219 * 8220 * The specification recommends a value of 3 for Abort Command Limit (four 8221 * concurrently outstanding Abort commands), so lets use that though it is 8222 * inconsequential. 8223 */ 8224 id->acl = 3; 8225 id->aerl = n->params.aerl; 8226 id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO; 8227 id->lpa = NVME_LPA_NS_SMART | NVME_LPA_CSE | NVME_LPA_EXTENDED; 8228 8229 /* recommended default value (~70 C) */ 8230 id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING); 8231 id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL); 8232 8233 id->sqes = (0x6 << 4) | 0x6; 8234 id->cqes = (0x4 << 4) | 0x4; 8235 id->nn = cpu_to_le32(NVME_MAX_NAMESPACES); 8236 id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP | 8237 NVME_ONCS_FEATURES | NVME_ONCS_DSM | 8238 NVME_ONCS_COMPARE | NVME_ONCS_COPY); 8239 8240 /* 8241 * NOTE: If this device ever supports a command set that does NOT use 0x0 8242 * as a Flush-equivalent operation, support for the broadcast NSID in Flush 8243 * should probably be removed. 8244 * 8245 * See comment in nvme_io_cmd. 8246 */ 8247 id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT; 8248 8249 id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1); 8250 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN); 8251 8252 nvme_init_subnqn(n); 8253 8254 id->psd[0].mp = cpu_to_le16(0x9c4); 8255 id->psd[0].enlat = cpu_to_le32(0x10); 8256 id->psd[0].exlat = cpu_to_le32(0x4); 8257 8258 if (n->subsys) { 8259 id->cmic |= NVME_CMIC_MULTI_CTRL; 8260 ctratt |= NVME_CTRATT_ENDGRPS; 8261 8262 id->endgidmax = cpu_to_le16(0x1); 8263 8264 if (n->subsys->endgrp.fdp.enabled) { 8265 ctratt |= NVME_CTRATT_FDPS; 8266 } 8267 } 8268 8269 id->ctratt = cpu_to_le32(ctratt); 8270 8271 NVME_CAP_SET_MQES(cap, 0x7ff); 8272 NVME_CAP_SET_CQR(cap, 1); 8273 NVME_CAP_SET_TO(cap, 0xf); 8274 NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_NVM); 8275 NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_CSI_SUPP); 8276 NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_ADMIN_ONLY); 8277 NVME_CAP_SET_MPSMAX(cap, 4); 8278 NVME_CAP_SET_CMBS(cap, n->params.cmb_size_mb ? 1 : 0); 8279 NVME_CAP_SET_PMRS(cap, n->pmr.dev ? 1 : 0); 8280 stq_le_p(&n->bar.cap, cap); 8281 8282 stl_le_p(&n->bar.vs, NVME_SPEC_VER); 8283 n->bar.intmc = n->bar.intms = 0; 8284 8285 if (pci_is_vf(pci_dev) && !sctrl->scs) { 8286 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); 8287 } 8288 } 8289 8290 static int nvme_init_subsys(NvmeCtrl *n, Error **errp) 8291 { 8292 int cntlid; 8293 8294 if (!n->subsys) { 8295 return 0; 8296 } 8297 8298 cntlid = nvme_subsys_register_ctrl(n, errp); 8299 if (cntlid < 0) { 8300 return -1; 8301 } 8302 8303 n->cntlid = cntlid; 8304 8305 return 0; 8306 } 8307 8308 void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns) 8309 { 8310 uint32_t nsid = ns->params.nsid; 8311 assert(nsid && nsid <= NVME_MAX_NAMESPACES); 8312 8313 n->namespaces[nsid] = ns; 8314 ns->attached++; 8315 8316 n->dmrsl = MIN_NON_ZERO(n->dmrsl, 8317 BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); 8318 } 8319 8320 static void nvme_realize(PCIDevice *pci_dev, Error **errp) 8321 { 8322 NvmeCtrl *n = NVME(pci_dev); 8323 DeviceState *dev = DEVICE(pci_dev); 8324 NvmeNamespace *ns; 8325 NvmeCtrl *pn = NVME(pcie_sriov_get_pf(pci_dev)); 8326 8327 if (pci_is_vf(pci_dev)) { 8328 /* 8329 * VFs derive settings from the parent. PF's lifespan exceeds 8330 * that of VF's, so it's safe to share params.serial. 8331 */ 8332 memcpy(&n->params, &pn->params, sizeof(NvmeParams)); 8333 n->subsys = pn->subsys; 8334 } 8335 8336 if (!nvme_check_params(n, errp)) { 8337 return; 8338 } 8339 8340 qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id); 8341 8342 if (nvme_init_subsys(n, errp)) { 8343 return; 8344 } 8345 nvme_init_state(n); 8346 if (!nvme_init_pci(n, pci_dev, errp)) { 8347 return; 8348 } 8349 nvme_init_ctrl(n, pci_dev); 8350 8351 /* setup a namespace if the controller drive property was given */ 8352 if (n->namespace.blkconf.blk) { 8353 ns = &n->namespace; 8354 ns->params.nsid = 1; 8355 8356 if (nvme_ns_setup(ns, errp)) { 8357 return; 8358 } 8359 8360 nvme_attach_ns(n, ns); 8361 } 8362 } 8363 8364 static void nvme_exit(PCIDevice *pci_dev) 8365 { 8366 NvmeCtrl *n = NVME(pci_dev); 8367 NvmeNamespace *ns; 8368 int i; 8369 8370 nvme_ctrl_reset(n, NVME_RESET_FUNCTION); 8371 8372 if (n->subsys) { 8373 for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { 8374 ns = nvme_ns(n, i); 8375 if (ns) { 8376 ns->attached--; 8377 } 8378 } 8379 8380 nvme_subsys_unregister_ctrl(n->subsys, n); 8381 } 8382 8383 g_free(n->cq); 8384 g_free(n->sq); 8385 g_free(n->aer_reqs); 8386 8387 if (n->params.cmb_size_mb) { 8388 g_free(n->cmb.buf); 8389 } 8390 8391 if (n->pmr.dev) { 8392 host_memory_backend_set_mapped(n->pmr.dev, false); 8393 } 8394 8395 if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) { 8396 pcie_sriov_pf_exit(pci_dev); 8397 } 8398 8399 msix_uninit(pci_dev, &n->bar0, &n->bar0); 8400 memory_region_del_subregion(&n->bar0, &n->iomem); 8401 } 8402 8403 static Property nvme_props[] = { 8404 DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf), 8405 DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmr.dev, TYPE_MEMORY_BACKEND, 8406 HostMemoryBackend *), 8407 DEFINE_PROP_LINK("subsys", NvmeCtrl, subsys, TYPE_NVME_SUBSYS, 8408 NvmeSubsystem *), 8409 DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial), 8410 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0), 8411 DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0), 8412 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64), 8413 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65), 8414 DEFINE_PROP_UINT8("aerl", NvmeCtrl, params.aerl, 3), 8415 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64), 8416 DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7), 8417 DEFINE_PROP_UINT8("vsl", NvmeCtrl, params.vsl, 7), 8418 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false), 8419 DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false), 8420 DEFINE_PROP_BOOL("ioeventfd", NvmeCtrl, params.ioeventfd, false), 8421 DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0), 8422 DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl, 8423 params.auto_transition_zones, true), 8424 DEFINE_PROP_UINT8("sriov_max_vfs", NvmeCtrl, params.sriov_max_vfs, 0), 8425 DEFINE_PROP_UINT16("sriov_vq_flexible", NvmeCtrl, 8426 params.sriov_vq_flexible, 0), 8427 DEFINE_PROP_UINT16("sriov_vi_flexible", NvmeCtrl, 8428 params.sriov_vi_flexible, 0), 8429 DEFINE_PROP_UINT8("sriov_max_vi_per_vf", NvmeCtrl, 8430 params.sriov_max_vi_per_vf, 0), 8431 DEFINE_PROP_UINT8("sriov_max_vq_per_vf", NvmeCtrl, 8432 params.sriov_max_vq_per_vf, 0), 8433 DEFINE_PROP_END_OF_LIST(), 8434 }; 8435 8436 static void nvme_get_smart_warning(Object *obj, Visitor *v, const char *name, 8437 void *opaque, Error **errp) 8438 { 8439 NvmeCtrl *n = NVME(obj); 8440 uint8_t value = n->smart_critical_warning; 8441 8442 visit_type_uint8(v, name, &value, errp); 8443 } 8444 8445 static void nvme_set_smart_warning(Object *obj, Visitor *v, const char *name, 8446 void *opaque, Error **errp) 8447 { 8448 NvmeCtrl *n = NVME(obj); 8449 uint8_t value, old_value, cap = 0, index, event; 8450 8451 if (!visit_type_uint8(v, name, &value, errp)) { 8452 return; 8453 } 8454 8455 cap = NVME_SMART_SPARE | NVME_SMART_TEMPERATURE | NVME_SMART_RELIABILITY 8456 | NVME_SMART_MEDIA_READ_ONLY | NVME_SMART_FAILED_VOLATILE_MEDIA; 8457 if (NVME_CAP_PMRS(ldq_le_p(&n->bar.cap))) { 8458 cap |= NVME_SMART_PMR_UNRELIABLE; 8459 } 8460 8461 if ((value & cap) != value) { 8462 error_setg(errp, "unsupported smart critical warning bits: 0x%x", 8463 value & ~cap); 8464 return; 8465 } 8466 8467 old_value = n->smart_critical_warning; 8468 n->smart_critical_warning = value; 8469 8470 /* only inject new bits of smart critical warning */ 8471 for (index = 0; index < NVME_SMART_WARN_MAX; index++) { 8472 event = 1 << index; 8473 if (value & ~old_value & event) 8474 nvme_smart_event(n, event); 8475 } 8476 } 8477 8478 static void nvme_pci_reset(DeviceState *qdev) 8479 { 8480 PCIDevice *pci_dev = PCI_DEVICE(qdev); 8481 NvmeCtrl *n = NVME(pci_dev); 8482 8483 trace_pci_nvme_pci_reset(); 8484 nvme_ctrl_reset(n, NVME_RESET_FUNCTION); 8485 } 8486 8487 static void nvme_sriov_pre_write_ctrl(PCIDevice *dev, uint32_t address, 8488 uint32_t val, int len) 8489 { 8490 NvmeCtrl *n = NVME(dev); 8491 NvmeSecCtrlEntry *sctrl; 8492 uint16_t sriov_cap = dev->exp.sriov_cap; 8493 uint32_t off = address - sriov_cap; 8494 int i, num_vfs; 8495 8496 if (!sriov_cap) { 8497 return; 8498 } 8499 8500 if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) { 8501 if (!(val & PCI_SRIOV_CTRL_VFE)) { 8502 num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF); 8503 for (i = 0; i < num_vfs; i++) { 8504 sctrl = &n->sec_ctrl_list.sec[i]; 8505 nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); 8506 } 8507 } 8508 } 8509 } 8510 8511 static void nvme_pci_write_config(PCIDevice *dev, uint32_t address, 8512 uint32_t val, int len) 8513 { 8514 nvme_sriov_pre_write_ctrl(dev, address, val, len); 8515 pci_default_write_config(dev, address, val, len); 8516 pcie_cap_flr_write_config(dev, address, val, len); 8517 } 8518 8519 static const VMStateDescription nvme_vmstate = { 8520 .name = "nvme", 8521 .unmigratable = 1, 8522 }; 8523 8524 static void nvme_class_init(ObjectClass *oc, void *data) 8525 { 8526 DeviceClass *dc = DEVICE_CLASS(oc); 8527 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); 8528 8529 pc->realize = nvme_realize; 8530 pc->config_write = nvme_pci_write_config; 8531 pc->exit = nvme_exit; 8532 pc->class_id = PCI_CLASS_STORAGE_EXPRESS; 8533 pc->revision = 2; 8534 8535 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 8536 dc->desc = "Non-Volatile Memory Express"; 8537 device_class_set_props(dc, nvme_props); 8538 dc->vmsd = &nvme_vmstate; 8539 dc->reset = nvme_pci_reset; 8540 } 8541 8542 static void nvme_instance_init(Object *obj) 8543 { 8544 NvmeCtrl *n = NVME(obj); 8545 8546 device_add_bootindex_property(obj, &n->namespace.blkconf.bootindex, 8547 "bootindex", "/namespace@1,0", 8548 DEVICE(obj)); 8549 8550 object_property_add(obj, "smart_critical_warning", "uint8", 8551 nvme_get_smart_warning, 8552 nvme_set_smart_warning, NULL, NULL); 8553 } 8554 8555 static const TypeInfo nvme_info = { 8556 .name = TYPE_NVME, 8557 .parent = TYPE_PCI_DEVICE, 8558 .instance_size = sizeof(NvmeCtrl), 8559 .instance_init = nvme_instance_init, 8560 .class_init = nvme_class_init, 8561 .interfaces = (InterfaceInfo[]) { 8562 { INTERFACE_PCIE_DEVICE }, 8563 { } 8564 }, 8565 }; 8566 8567 static const TypeInfo nvme_bus_info = { 8568 .name = TYPE_NVME_BUS, 8569 .parent = TYPE_BUS, 8570 .instance_size = sizeof(NvmeBus), 8571 }; 8572 8573 static void nvme_register_types(void) 8574 { 8575 type_register_static(&nvme_info); 8576 type_register_static(&nvme_bus_info); 8577 } 8578 8579 type_init(nvme_register_types) 8580