1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe admin command implementation. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/rculist.h> 9 #include <linux/part_stat.h> 10 11 #include <generated/utsrelease.h> 12 #include <asm/unaligned.h> 13 #include "nvmet.h" 14 15 u32 nvmet_get_log_page_len(struct nvme_command *cmd) 16 { 17 u32 len = le16_to_cpu(cmd->get_log_page.numdu); 18 19 len <<= 16; 20 len += le16_to_cpu(cmd->get_log_page.numdl); 21 /* NUMD is a 0's based value */ 22 len += 1; 23 len *= sizeof(u32); 24 25 return len; 26 } 27 28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10) 29 { 30 switch (cdw10 & 0xff) { 31 case NVME_FEAT_HOST_ID: 32 return sizeof(req->sq->ctrl->hostid); 33 default: 34 return 0; 35 } 36 } 37 38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd) 39 { 40 return le64_to_cpu(cmd->get_log_page.lpo); 41 } 42 43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) 44 { 45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); 46 } 47 48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req) 49 { 50 struct nvmet_ctrl *ctrl = req->sq->ctrl; 51 unsigned long flags; 52 off_t offset = 0; 53 u64 slot; 54 u64 i; 55 56 spin_lock_irqsave(&ctrl->error_lock, flags); 57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS; 58 59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) { 60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], 61 sizeof(struct nvme_error_slot))) 62 break; 63 64 if (slot == 0) 65 slot = NVMET_ERROR_LOG_SLOTS - 1; 66 else 67 slot--; 68 offset += sizeof(struct nvme_error_slot); 69 } 70 spin_unlock_irqrestore(&ctrl->error_lock, flags); 71 nvmet_req_complete(req, 0); 72 } 73 74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, 75 struct nvme_smart_log *slog) 76 { 77 u64 host_reads, host_writes, data_units_read, data_units_written; 78 u16 status; 79 80 status = nvmet_req_find_ns(req); 81 if (status) 82 return status; 83 84 /* we don't have the right data for file backed ns */ 85 if (!req->ns->bdev) 86 return NVME_SC_SUCCESS; 87 88 host_reads = part_stat_read(req->ns->bdev, ios[READ]); 89 data_units_read = 90 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); 91 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); 92 data_units_written = 93 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); 94 95 put_unaligned_le64(host_reads, &slog->host_reads[0]); 96 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); 97 put_unaligned_le64(host_writes, &slog->host_writes[0]); 98 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); 99 100 return NVME_SC_SUCCESS; 101 } 102 103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req, 104 struct nvme_smart_log *slog) 105 { 106 u64 host_reads = 0, host_writes = 0; 107 u64 data_units_read = 0, data_units_written = 0; 108 struct nvmet_ns *ns; 109 struct nvmet_ctrl *ctrl; 110 unsigned long idx; 111 112 ctrl = req->sq->ctrl; 113 xa_for_each(&ctrl->subsys->namespaces, idx, ns) { 114 /* we don't have the right data for file backed ns */ 115 if (!ns->bdev) 116 continue; 117 host_reads += part_stat_read(ns->bdev, ios[READ]); 118 data_units_read += DIV_ROUND_UP( 119 part_stat_read(ns->bdev, sectors[READ]), 1000); 120 host_writes += part_stat_read(ns->bdev, ios[WRITE]); 121 data_units_written += DIV_ROUND_UP( 122 part_stat_read(ns->bdev, sectors[WRITE]), 1000); 123 } 124 125 put_unaligned_le64(host_reads, &slog->host_reads[0]); 126 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); 127 put_unaligned_le64(host_writes, &slog->host_writes[0]); 128 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); 129 130 return NVME_SC_SUCCESS; 131 } 132 133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req) 134 { 135 struct nvme_smart_log *log; 136 u16 status = NVME_SC_INTERNAL; 137 unsigned long flags; 138 139 if (req->transfer_len != sizeof(*log)) 140 goto out; 141 142 log = kzalloc(sizeof(*log), GFP_KERNEL); 143 if (!log) 144 goto out; 145 146 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL)) 147 status = nvmet_get_smart_log_all(req, log); 148 else 149 status = nvmet_get_smart_log_nsid(req, log); 150 if (status) 151 goto out_free_log; 152 153 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags); 154 put_unaligned_le64(req->sq->ctrl->err_counter, 155 &log->num_err_log_entries); 156 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags); 157 158 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); 159 out_free_log: 160 kfree(log); 161 out: 162 nvmet_req_complete(req, status); 163 } 164 165 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) 166 { 167 log->acs[nvme_admin_get_log_page] = 168 log->acs[nvme_admin_identify] = 169 log->acs[nvme_admin_abort_cmd] = 170 log->acs[nvme_admin_set_features] = 171 log->acs[nvme_admin_get_features] = 172 log->acs[nvme_admin_async_event] = 173 log->acs[nvme_admin_keep_alive] = 174 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); 175 176 log->iocs[nvme_cmd_read] = 177 log->iocs[nvme_cmd_flush] = 178 log->iocs[nvme_cmd_dsm] = 179 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); 180 log->iocs[nvme_cmd_write] = 181 log->iocs[nvme_cmd_write_zeroes] = 182 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC); 183 } 184 185 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log) 186 { 187 log->iocs[nvme_cmd_zone_append] = 188 log->iocs[nvme_cmd_zone_mgmt_send] = 189 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC); 190 log->iocs[nvme_cmd_zone_mgmt_recv] = 191 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); 192 } 193 194 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) 195 { 196 struct nvme_effects_log *log; 197 u16 status = NVME_SC_SUCCESS; 198 199 log = kzalloc(sizeof(*log), GFP_KERNEL); 200 if (!log) { 201 status = NVME_SC_INTERNAL; 202 goto out; 203 } 204 205 switch (req->cmd->get_log_page.csi) { 206 case NVME_CSI_NVM: 207 nvmet_get_cmd_effects_nvm(log); 208 break; 209 case NVME_CSI_ZNS: 210 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 211 status = NVME_SC_INVALID_IO_CMD_SET; 212 goto free; 213 } 214 nvmet_get_cmd_effects_nvm(log); 215 nvmet_get_cmd_effects_zns(log); 216 break; 217 default: 218 status = NVME_SC_INVALID_LOG_PAGE; 219 goto free; 220 } 221 222 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); 223 free: 224 kfree(log); 225 out: 226 nvmet_req_complete(req, status); 227 } 228 229 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) 230 { 231 struct nvmet_ctrl *ctrl = req->sq->ctrl; 232 u16 status = NVME_SC_INTERNAL; 233 size_t len; 234 235 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) 236 goto out; 237 238 mutex_lock(&ctrl->lock); 239 if (ctrl->nr_changed_ns == U32_MAX) 240 len = sizeof(__le32); 241 else 242 len = ctrl->nr_changed_ns * sizeof(__le32); 243 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); 244 if (!status) 245 status = nvmet_zero_sgl(req, len, req->transfer_len - len); 246 ctrl->nr_changed_ns = 0; 247 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR); 248 mutex_unlock(&ctrl->lock); 249 out: 250 nvmet_req_complete(req, status); 251 } 252 253 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid, 254 struct nvme_ana_group_desc *desc) 255 { 256 struct nvmet_ctrl *ctrl = req->sq->ctrl; 257 struct nvmet_ns *ns; 258 unsigned long idx; 259 u32 count = 0; 260 261 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { 262 xa_for_each(&ctrl->subsys->namespaces, idx, ns) 263 if (ns->anagrpid == grpid) 264 desc->nsids[count++] = cpu_to_le32(ns->nsid); 265 } 266 267 desc->grpid = cpu_to_le32(grpid); 268 desc->nnsids = cpu_to_le32(count); 269 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt); 270 desc->state = req->port->ana_state[grpid]; 271 memset(desc->rsvd17, 0, sizeof(desc->rsvd17)); 272 return struct_size(desc, nsids, count); 273 } 274 275 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) 276 { 277 struct nvme_ana_rsp_hdr hdr = { 0, }; 278 struct nvme_ana_group_desc *desc; 279 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */ 280 size_t len; 281 u32 grpid; 282 u16 ngrps = 0; 283 u16 status; 284 285 status = NVME_SC_INTERNAL; 286 desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES), 287 GFP_KERNEL); 288 if (!desc) 289 goto out; 290 291 down_read(&nvmet_ana_sem); 292 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) { 293 if (!nvmet_ana_group_enabled[grpid]) 294 continue; 295 len = nvmet_format_ana_group(req, grpid, desc); 296 status = nvmet_copy_to_sgl(req, offset, desc, len); 297 if (status) 298 break; 299 offset += len; 300 ngrps++; 301 } 302 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) { 303 if (nvmet_ana_group_enabled[grpid]) 304 ngrps++; 305 } 306 307 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); 308 hdr.ngrps = cpu_to_le16(ngrps); 309 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE); 310 up_read(&nvmet_ana_sem); 311 312 kfree(desc); 313 314 /* copy the header last once we know the number of groups */ 315 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr)); 316 out: 317 nvmet_req_complete(req, status); 318 } 319 320 static void nvmet_execute_get_log_page(struct nvmet_req *req) 321 { 322 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) 323 return; 324 325 switch (req->cmd->get_log_page.lid) { 326 case NVME_LOG_ERROR: 327 return nvmet_execute_get_log_page_error(req); 328 case NVME_LOG_SMART: 329 return nvmet_execute_get_log_page_smart(req); 330 case NVME_LOG_FW_SLOT: 331 /* 332 * We only support a single firmware slot which always is 333 * active, so we can zero out the whole firmware slot log and 334 * still claim to fully implement this mandatory log page. 335 */ 336 return nvmet_execute_get_log_page_noop(req); 337 case NVME_LOG_CHANGED_NS: 338 return nvmet_execute_get_log_changed_ns(req); 339 case NVME_LOG_CMD_EFFECTS: 340 return nvmet_execute_get_log_cmd_effects_ns(req); 341 case NVME_LOG_ANA: 342 return nvmet_execute_get_log_page_ana(req); 343 } 344 pr_debug("unhandled lid %d on qid %d\n", 345 req->cmd->get_log_page.lid, req->sq->qid); 346 req->error_loc = offsetof(struct nvme_get_log_page_command, lid); 347 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); 348 } 349 350 static void nvmet_execute_identify_ctrl(struct nvmet_req *req) 351 { 352 struct nvmet_ctrl *ctrl = req->sq->ctrl; 353 struct nvmet_subsys *subsys = ctrl->subsys; 354 struct nvme_id_ctrl *id; 355 u32 cmd_capsule_size; 356 u16 status = 0; 357 358 if (!subsys->subsys_discovered) { 359 mutex_lock(&subsys->lock); 360 subsys->subsys_discovered = true; 361 mutex_unlock(&subsys->lock); 362 } 363 364 id = kzalloc(sizeof(*id), GFP_KERNEL); 365 if (!id) { 366 status = NVME_SC_INTERNAL; 367 goto out; 368 } 369 370 /* XXX: figure out how to assign real vendors IDs. */ 371 id->vid = 0; 372 id->ssvid = 0; 373 374 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); 375 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number, 376 strlen(subsys->model_number), ' '); 377 memcpy_and_pad(id->fr, sizeof(id->fr), 378 subsys->firmware_rev, strlen(subsys->firmware_rev), ' '); 379 380 put_unaligned_le24(subsys->ieee_oui, id->ieee); 381 382 id->rab = 6; 383 384 if (nvmet_is_disc_subsys(ctrl->subsys)) 385 id->cntrltype = NVME_CTRL_DISC; 386 else 387 id->cntrltype = NVME_CTRL_IO; 388 389 /* we support multiple ports, multiples hosts and ANA: */ 390 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL | 391 NVME_CTRL_CMIC_ANA; 392 393 /* Limit MDTS according to transport capability */ 394 if (ctrl->ops->get_mdts) 395 id->mdts = ctrl->ops->get_mdts(ctrl); 396 else 397 id->mdts = 0; 398 399 id->cntlid = cpu_to_le16(ctrl->cntlid); 400 id->ver = cpu_to_le32(ctrl->subsys->ver); 401 402 /* XXX: figure out what to do about RTD3R/RTD3 */ 403 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL); 404 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT | 405 NVME_CTRL_ATTR_TBKAS); 406 407 id->oacs = 0; 408 409 /* 410 * We don't really have a practical limit on the number of abort 411 * comands. But we don't do anything useful for abort either, so 412 * no point in allowing more abort commands than the spec requires. 413 */ 414 id->acl = 3; 415 416 id->aerl = NVMET_ASYNC_EVENTS - 1; 417 418 /* first slot is read-only, only one slot supported */ 419 id->frmw = (1 << 0) | (1 << 1); 420 id->lpa = (1 << 0) | (1 << 1) | (1 << 2); 421 id->elpe = NVMET_ERROR_LOG_SLOTS - 1; 422 id->npss = 0; 423 424 /* We support keep-alive timeout in granularity of seconds */ 425 id->kas = cpu_to_le16(NVMET_KAS); 426 427 id->sqes = (0x6 << 4) | 0x6; 428 id->cqes = (0x4 << 4) | 0x4; 429 430 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ 431 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); 432 433 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); 434 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); 435 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | 436 NVME_CTRL_ONCS_WRITE_ZEROES); 437 438 /* XXX: don't report vwc if the underlying device is write through */ 439 id->vwc = NVME_CTRL_VWC_PRESENT; 440 441 /* 442 * We can't support atomic writes bigger than a LBA without support 443 * from the backend device. 444 */ 445 id->awun = 0; 446 id->awupf = 0; 447 448 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ 449 if (ctrl->ops->flags & NVMF_KEYED_SGLS) 450 id->sgls |= cpu_to_le32(1 << 2); 451 if (req->port->inline_data_size) 452 id->sgls |= cpu_to_le32(1 << 20); 453 454 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); 455 456 /* 457 * Max command capsule size is sqe + in-capsule data size. 458 * Disable in-capsule data for Metadata capable controllers. 459 */ 460 cmd_capsule_size = sizeof(struct nvme_command); 461 if (!ctrl->pi_support) 462 cmd_capsule_size += req->port->inline_data_size; 463 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16); 464 465 /* Max response capsule size is cqe */ 466 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); 467 468 id->msdbd = ctrl->ops->msdbd; 469 470 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4); 471 id->anatt = 10; /* random value */ 472 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS); 473 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS); 474 475 /* 476 * Meh, we don't really support any power state. Fake up the same 477 * values that qemu does. 478 */ 479 id->psd[0].max_power = cpu_to_le16(0x9c4); 480 id->psd[0].entry_lat = cpu_to_le32(0x10); 481 id->psd[0].exit_lat = cpu_to_le32(0x4); 482 483 id->nwpc = 1 << 0; /* write protect and no write protect */ 484 485 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 486 487 kfree(id); 488 out: 489 nvmet_req_complete(req, status); 490 } 491 492 static void nvmet_execute_identify_ns(struct nvmet_req *req) 493 { 494 struct nvme_id_ns *id; 495 u16 status; 496 497 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { 498 req->error_loc = offsetof(struct nvme_identify, nsid); 499 status = NVME_SC_INVALID_NS | NVME_SC_DNR; 500 goto out; 501 } 502 503 id = kzalloc(sizeof(*id), GFP_KERNEL); 504 if (!id) { 505 status = NVME_SC_INTERNAL; 506 goto out; 507 } 508 509 /* return an all zeroed buffer if we can't find an active namespace */ 510 status = nvmet_req_find_ns(req); 511 if (status) { 512 status = 0; 513 goto done; 514 } 515 516 if (nvmet_ns_revalidate(req->ns)) { 517 mutex_lock(&req->ns->subsys->lock); 518 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); 519 mutex_unlock(&req->ns->subsys->lock); 520 } 521 522 /* 523 * nuse = ncap = nsze isn't always true, but we have no way to find 524 * that out from the underlying device. 525 */ 526 id->ncap = id->nsze = 527 cpu_to_le64(req->ns->size >> req->ns->blksize_shift); 528 switch (req->port->ana_state[req->ns->anagrpid]) { 529 case NVME_ANA_INACCESSIBLE: 530 case NVME_ANA_PERSISTENT_LOSS: 531 break; 532 default: 533 id->nuse = id->nsze; 534 break; 535 } 536 537 if (req->ns->bdev) 538 nvmet_bdev_set_limits(req->ns->bdev, id); 539 540 /* 541 * We just provide a single LBA format that matches what the 542 * underlying device reports. 543 */ 544 id->nlbaf = 0; 545 id->flbas = 0; 546 547 /* 548 * Our namespace might always be shared. Not just with other 549 * controllers, but also with any other user of the block device. 550 */ 551 id->nmic = NVME_NS_NMIC_SHARED; 552 id->anagrpid = cpu_to_le32(req->ns->anagrpid); 553 554 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); 555 556 id->lbaf[0].ds = req->ns->blksize_shift; 557 558 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { 559 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST | 560 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 | 561 NVME_NS_DPC_PI_TYPE3; 562 id->mc = NVME_MC_EXTENDED_LBA; 563 id->dps = req->ns->pi_type; 564 id->flbas = NVME_NS_FLBAS_META_EXT; 565 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size); 566 } 567 568 if (req->ns->readonly) 569 id->nsattr |= NVME_NS_ATTR_RO; 570 done: 571 if (!status) 572 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 573 574 kfree(id); 575 out: 576 nvmet_req_complete(req, status); 577 } 578 579 static void nvmet_execute_identify_nslist(struct nvmet_req *req) 580 { 581 static const int buf_size = NVME_IDENTIFY_DATA_SIZE; 582 struct nvmet_ctrl *ctrl = req->sq->ctrl; 583 struct nvmet_ns *ns; 584 unsigned long idx; 585 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); 586 __le32 *list; 587 u16 status = 0; 588 int i = 0; 589 590 list = kzalloc(buf_size, GFP_KERNEL); 591 if (!list) { 592 status = NVME_SC_INTERNAL; 593 goto out; 594 } 595 596 xa_for_each(&ctrl->subsys->namespaces, idx, ns) { 597 if (ns->nsid <= min_nsid) 598 continue; 599 list[i++] = cpu_to_le32(ns->nsid); 600 if (i == buf_size / sizeof(__le32)) 601 break; 602 } 603 604 status = nvmet_copy_to_sgl(req, 0, list, buf_size); 605 606 kfree(list); 607 out: 608 nvmet_req_complete(req, status); 609 } 610 611 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len, 612 void *id, off_t *off) 613 { 614 struct nvme_ns_id_desc desc = { 615 .nidt = type, 616 .nidl = len, 617 }; 618 u16 status; 619 620 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc)); 621 if (status) 622 return status; 623 *off += sizeof(desc); 624 625 status = nvmet_copy_to_sgl(req, *off, id, len); 626 if (status) 627 return status; 628 *off += len; 629 630 return 0; 631 } 632 633 static void nvmet_execute_identify_desclist(struct nvmet_req *req) 634 { 635 off_t off = 0; 636 u16 status; 637 638 status = nvmet_req_find_ns(req); 639 if (status) 640 goto out; 641 642 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { 643 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID, 644 NVME_NIDT_UUID_LEN, 645 &req->ns->uuid, &off); 646 if (status) 647 goto out; 648 } 649 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) { 650 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID, 651 NVME_NIDT_NGUID_LEN, 652 &req->ns->nguid, &off); 653 if (status) 654 goto out; 655 } 656 657 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI, 658 NVME_NIDT_CSI_LEN, 659 &req->ns->csi, &off); 660 if (status) 661 goto out; 662 663 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, 664 off) != NVME_IDENTIFY_DATA_SIZE - off) 665 status = NVME_SC_INTERNAL | NVME_SC_DNR; 666 667 out: 668 nvmet_req_complete(req, status); 669 } 670 671 static bool nvmet_handle_identify_desclist(struct nvmet_req *req) 672 { 673 switch (req->cmd->identify.csi) { 674 case NVME_CSI_NVM: 675 nvmet_execute_identify_desclist(req); 676 return true; 677 case NVME_CSI_ZNS: 678 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 679 nvmet_execute_identify_desclist(req); 680 return true; 681 } 682 return false; 683 default: 684 return false; 685 } 686 } 687 688 static void nvmet_execute_identify(struct nvmet_req *req) 689 { 690 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) 691 return; 692 693 switch (req->cmd->identify.cns) { 694 case NVME_ID_CNS_NS: 695 switch (req->cmd->identify.csi) { 696 case NVME_CSI_NVM: 697 return nvmet_execute_identify_ns(req); 698 default: 699 break; 700 } 701 break; 702 case NVME_ID_CNS_CS_NS: 703 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 704 switch (req->cmd->identify.csi) { 705 case NVME_CSI_ZNS: 706 return nvmet_execute_identify_cns_cs_ns(req); 707 default: 708 break; 709 } 710 } 711 break; 712 case NVME_ID_CNS_CTRL: 713 switch (req->cmd->identify.csi) { 714 case NVME_CSI_NVM: 715 return nvmet_execute_identify_ctrl(req); 716 } 717 break; 718 case NVME_ID_CNS_CS_CTRL: 719 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 720 switch (req->cmd->identify.csi) { 721 case NVME_CSI_ZNS: 722 return nvmet_execute_identify_cns_cs_ctrl(req); 723 default: 724 break; 725 } 726 } 727 break; 728 case NVME_ID_CNS_NS_ACTIVE_LIST: 729 switch (req->cmd->identify.csi) { 730 case NVME_CSI_NVM: 731 return nvmet_execute_identify_nslist(req); 732 default: 733 break; 734 } 735 break; 736 case NVME_ID_CNS_NS_DESC_LIST: 737 if (nvmet_handle_identify_desclist(req) == true) 738 return; 739 break; 740 } 741 742 nvmet_req_cns_error_complete(req); 743 } 744 745 /* 746 * A "minimum viable" abort implementation: the command is mandatory in the 747 * spec, but we are not required to do any useful work. We couldn't really 748 * do a useful abort, so don't bother even with waiting for the command 749 * to be exectuted and return immediately telling the command to abort 750 * wasn't found. 751 */ 752 static void nvmet_execute_abort(struct nvmet_req *req) 753 { 754 if (!nvmet_check_transfer_len(req, 0)) 755 return; 756 nvmet_set_result(req, 1); 757 nvmet_req_complete(req, 0); 758 } 759 760 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req) 761 { 762 u16 status; 763 764 if (req->ns->file) 765 status = nvmet_file_flush(req); 766 else 767 status = nvmet_bdev_flush(req); 768 769 if (status) 770 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid); 771 return status; 772 } 773 774 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req) 775 { 776 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); 777 struct nvmet_subsys *subsys = nvmet_req_subsys(req); 778 u16 status; 779 780 status = nvmet_req_find_ns(req); 781 if (status) 782 return status; 783 784 mutex_lock(&subsys->lock); 785 switch (write_protect) { 786 case NVME_NS_WRITE_PROTECT: 787 req->ns->readonly = true; 788 status = nvmet_write_protect_flush_sync(req); 789 if (status) 790 req->ns->readonly = false; 791 break; 792 case NVME_NS_NO_WRITE_PROTECT: 793 req->ns->readonly = false; 794 status = 0; 795 break; 796 default: 797 break; 798 } 799 800 if (!status) 801 nvmet_ns_changed(subsys, req->ns->nsid); 802 mutex_unlock(&subsys->lock); 803 return status; 804 } 805 806 u16 nvmet_set_feat_kato(struct nvmet_req *req) 807 { 808 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); 809 810 nvmet_stop_keep_alive_timer(req->sq->ctrl); 811 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); 812 nvmet_start_keep_alive_timer(req->sq->ctrl); 813 814 nvmet_set_result(req, req->sq->ctrl->kato); 815 816 return 0; 817 } 818 819 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) 820 { 821 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); 822 823 if (val32 & ~mask) { 824 req->error_loc = offsetof(struct nvme_common_command, cdw11); 825 return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 826 } 827 828 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); 829 nvmet_set_result(req, val32); 830 831 return 0; 832 } 833 834 void nvmet_execute_set_features(struct nvmet_req *req) 835 { 836 struct nvmet_subsys *subsys = nvmet_req_subsys(req); 837 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 838 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); 839 u16 status = 0; 840 u16 nsqr; 841 u16 ncqr; 842 843 if (!nvmet_check_transfer_len(req, 0)) 844 return; 845 846 switch (cdw10 & 0xff) { 847 case NVME_FEAT_NUM_QUEUES: 848 ncqr = (cdw11 >> 16) & 0xffff; 849 nsqr = cdw11 & 0xffff; 850 if (ncqr == 0xffff || nsqr == 0xffff) { 851 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 852 break; 853 } 854 nvmet_set_result(req, 855 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); 856 break; 857 case NVME_FEAT_KATO: 858 status = nvmet_set_feat_kato(req); 859 break; 860 case NVME_FEAT_ASYNC_EVENT: 861 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); 862 break; 863 case NVME_FEAT_HOST_ID: 864 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; 865 break; 866 case NVME_FEAT_WRITE_PROTECT: 867 status = nvmet_set_feat_write_protect(req); 868 break; 869 default: 870 req->error_loc = offsetof(struct nvme_common_command, cdw10); 871 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 872 break; 873 } 874 875 nvmet_req_complete(req, status); 876 } 877 878 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) 879 { 880 struct nvmet_subsys *subsys = nvmet_req_subsys(req); 881 u32 result; 882 883 result = nvmet_req_find_ns(req); 884 if (result) 885 return result; 886 887 mutex_lock(&subsys->lock); 888 if (req->ns->readonly == true) 889 result = NVME_NS_WRITE_PROTECT; 890 else 891 result = NVME_NS_NO_WRITE_PROTECT; 892 nvmet_set_result(req, result); 893 mutex_unlock(&subsys->lock); 894 895 return 0; 896 } 897 898 void nvmet_get_feat_kato(struct nvmet_req *req) 899 { 900 nvmet_set_result(req, req->sq->ctrl->kato * 1000); 901 } 902 903 void nvmet_get_feat_async_event(struct nvmet_req *req) 904 { 905 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); 906 } 907 908 void nvmet_execute_get_features(struct nvmet_req *req) 909 { 910 struct nvmet_subsys *subsys = nvmet_req_subsys(req); 911 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 912 u16 status = 0; 913 914 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10))) 915 return; 916 917 switch (cdw10 & 0xff) { 918 /* 919 * These features are mandatory in the spec, but we don't 920 * have a useful way to implement them. We'll eventually 921 * need to come up with some fake values for these. 922 */ 923 #if 0 924 case NVME_FEAT_ARBITRATION: 925 break; 926 case NVME_FEAT_POWER_MGMT: 927 break; 928 case NVME_FEAT_TEMP_THRESH: 929 break; 930 case NVME_FEAT_ERR_RECOVERY: 931 break; 932 case NVME_FEAT_IRQ_COALESCE: 933 break; 934 case NVME_FEAT_IRQ_CONFIG: 935 break; 936 case NVME_FEAT_WRITE_ATOMIC: 937 break; 938 #endif 939 case NVME_FEAT_ASYNC_EVENT: 940 nvmet_get_feat_async_event(req); 941 break; 942 case NVME_FEAT_VOLATILE_WC: 943 nvmet_set_result(req, 1); 944 break; 945 case NVME_FEAT_NUM_QUEUES: 946 nvmet_set_result(req, 947 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); 948 break; 949 case NVME_FEAT_KATO: 950 nvmet_get_feat_kato(req); 951 break; 952 case NVME_FEAT_HOST_ID: 953 /* need 128-bit host identifier flag */ 954 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { 955 req->error_loc = 956 offsetof(struct nvme_common_command, cdw11); 957 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 958 break; 959 } 960 961 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, 962 sizeof(req->sq->ctrl->hostid)); 963 break; 964 case NVME_FEAT_WRITE_PROTECT: 965 status = nvmet_get_feat_write_protect(req); 966 break; 967 default: 968 req->error_loc = 969 offsetof(struct nvme_common_command, cdw10); 970 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 971 break; 972 } 973 974 nvmet_req_complete(req, status); 975 } 976 977 void nvmet_execute_async_event(struct nvmet_req *req) 978 { 979 struct nvmet_ctrl *ctrl = req->sq->ctrl; 980 981 if (!nvmet_check_transfer_len(req, 0)) 982 return; 983 984 mutex_lock(&ctrl->lock); 985 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { 986 mutex_unlock(&ctrl->lock); 987 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR); 988 return; 989 } 990 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; 991 mutex_unlock(&ctrl->lock); 992 993 queue_work(nvmet_wq, &ctrl->async_event_work); 994 } 995 996 void nvmet_execute_keep_alive(struct nvmet_req *req) 997 { 998 struct nvmet_ctrl *ctrl = req->sq->ctrl; 999 u16 status = 0; 1000 1001 if (!nvmet_check_transfer_len(req, 0)) 1002 return; 1003 1004 if (!ctrl->kato) { 1005 status = NVME_SC_KA_TIMEOUT_INVALID; 1006 goto out; 1007 } 1008 1009 pr_debug("ctrl %d update keep-alive timer for %d secs\n", 1010 ctrl->cntlid, ctrl->kato); 1011 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); 1012 out: 1013 nvmet_req_complete(req, status); 1014 } 1015 1016 u16 nvmet_parse_admin_cmd(struct nvmet_req *req) 1017 { 1018 struct nvme_command *cmd = req->cmd; 1019 u16 ret; 1020 1021 if (nvme_is_fabrics(cmd)) 1022 return nvmet_parse_fabrics_admin_cmd(req); 1023 if (unlikely(!nvmet_check_auth_status(req))) 1024 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; 1025 if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) 1026 return nvmet_parse_discovery_cmd(req); 1027 1028 ret = nvmet_check_ctrl_status(req); 1029 if (unlikely(ret)) 1030 return ret; 1031 1032 if (nvmet_is_passthru_req(req)) 1033 return nvmet_parse_passthru_admin_cmd(req); 1034 1035 switch (cmd->common.opcode) { 1036 case nvme_admin_get_log_page: 1037 req->execute = nvmet_execute_get_log_page; 1038 return 0; 1039 case nvme_admin_identify: 1040 req->execute = nvmet_execute_identify; 1041 return 0; 1042 case nvme_admin_abort_cmd: 1043 req->execute = nvmet_execute_abort; 1044 return 0; 1045 case nvme_admin_set_features: 1046 req->execute = nvmet_execute_set_features; 1047 return 0; 1048 case nvme_admin_get_features: 1049 req->execute = nvmet_execute_get_features; 1050 return 0; 1051 case nvme_admin_async_event: 1052 req->execute = nvmet_execute_async_event; 1053 return 0; 1054 case nvme_admin_keep_alive: 1055 req->execute = nvmet_execute_keep_alive; 1056 return 0; 1057 default: 1058 return nvmet_report_invalid_opcode(req); 1059 } 1060 } 1061