1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/blkdev.h> 8 #include <linux/blk-mq.h> 9 #include <linux/blk-integrity.h> 10 #include <linux/compat.h> 11 #include <linux/delay.h> 12 #include <linux/errno.h> 13 #include <linux/hdreg.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/pr.h> 20 #include <linux/ptrace.h> 21 #include <linux/nvme_ioctl.h> 22 #include <linux/pm_qos.h> 23 #include <asm/unaligned.h> 24 25 #include "nvme.h" 26 #include "fabrics.h" 27 #include <linux/nvme-auth.h> 28 29 #define CREATE_TRACE_POINTS 30 #include "trace.h" 31 32 #define NVME_MINORS (1U << MINORBITS) 33 34 struct nvme_ns_info { 35 struct nvme_ns_ids ids; 36 u32 nsid; 37 __le32 anagrpid; 38 bool is_shared; 39 bool is_readonly; 40 bool is_ready; 41 }; 42 43 unsigned int admin_timeout = 60; 44 module_param(admin_timeout, uint, 0644); 45 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 46 EXPORT_SYMBOL_GPL(admin_timeout); 47 48 unsigned int nvme_io_timeout = 30; 49 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 50 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 51 EXPORT_SYMBOL_GPL(nvme_io_timeout); 52 53 static unsigned char shutdown_timeout = 5; 54 module_param(shutdown_timeout, byte, 0644); 55 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 56 57 static u8 nvme_max_retries = 5; 58 module_param_named(max_retries, nvme_max_retries, byte, 0644); 59 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 60 61 static unsigned long default_ps_max_latency_us = 100000; 62 module_param(default_ps_max_latency_us, ulong, 0644); 63 MODULE_PARM_DESC(default_ps_max_latency_us, 64 "max power saving latency for new devices; use PM QOS to change per device"); 65 66 static bool force_apst; 67 module_param(force_apst, bool, 0644); 68 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 69 70 static unsigned long apst_primary_timeout_ms = 100; 71 module_param(apst_primary_timeout_ms, ulong, 0644); 72 MODULE_PARM_DESC(apst_primary_timeout_ms, 73 "primary APST timeout in ms"); 74 75 static unsigned long apst_secondary_timeout_ms = 2000; 76 module_param(apst_secondary_timeout_ms, ulong, 0644); 77 MODULE_PARM_DESC(apst_secondary_timeout_ms, 78 "secondary APST timeout in ms"); 79 80 static unsigned long apst_primary_latency_tol_us = 15000; 81 module_param(apst_primary_latency_tol_us, ulong, 0644); 82 MODULE_PARM_DESC(apst_primary_latency_tol_us, 83 "primary APST latency tolerance in us"); 84 85 static unsigned long apst_secondary_latency_tol_us = 100000; 86 module_param(apst_secondary_latency_tol_us, ulong, 0644); 87 MODULE_PARM_DESC(apst_secondary_latency_tol_us, 88 "secondary APST latency tolerance in us"); 89 90 /* 91 * nvme_wq - hosts nvme related works that are not reset or delete 92 * nvme_reset_wq - hosts nvme reset works 93 * nvme_delete_wq - hosts nvme delete works 94 * 95 * nvme_wq will host works such as scan, aen handling, fw activation, 96 * keep-alive, periodic reconnects etc. nvme_reset_wq 97 * runs reset works which also flush works hosted on nvme_wq for 98 * serialization purposes. nvme_delete_wq host controller deletion 99 * works which flush reset works for serialization. 100 */ 101 struct workqueue_struct *nvme_wq; 102 EXPORT_SYMBOL_GPL(nvme_wq); 103 104 struct workqueue_struct *nvme_reset_wq; 105 EXPORT_SYMBOL_GPL(nvme_reset_wq); 106 107 struct workqueue_struct *nvme_delete_wq; 108 EXPORT_SYMBOL_GPL(nvme_delete_wq); 109 110 static LIST_HEAD(nvme_subsystems); 111 static DEFINE_MUTEX(nvme_subsystems_lock); 112 113 static DEFINE_IDA(nvme_instance_ida); 114 static dev_t nvme_ctrl_base_chr_devt; 115 static struct class *nvme_class; 116 static struct class *nvme_subsys_class; 117 118 static DEFINE_IDA(nvme_ns_chr_minor_ida); 119 static dev_t nvme_ns_chr_devt; 120 static struct class *nvme_ns_chr_class; 121 122 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 123 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 124 unsigned nsid); 125 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 126 struct nvme_command *cmd); 127 128 void nvme_queue_scan(struct nvme_ctrl *ctrl) 129 { 130 /* 131 * Only new queue scan work when admin and IO queues are both alive 132 */ 133 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) 134 queue_work(nvme_wq, &ctrl->scan_work); 135 } 136 137 /* 138 * Use this function to proceed with scheduling reset_work for a controller 139 * that had previously been set to the resetting state. This is intended for 140 * code paths that can't be interrupted by other reset attempts. A hot removal 141 * may prevent this from succeeding. 142 */ 143 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) 144 { 145 if (ctrl->state != NVME_CTRL_RESETTING) 146 return -EBUSY; 147 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 148 return -EBUSY; 149 return 0; 150 } 151 EXPORT_SYMBOL_GPL(nvme_try_sched_reset); 152 153 static void nvme_failfast_work(struct work_struct *work) 154 { 155 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 156 struct nvme_ctrl, failfast_work); 157 158 if (ctrl->state != NVME_CTRL_CONNECTING) 159 return; 160 161 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 162 dev_info(ctrl->device, "failfast expired\n"); 163 nvme_kick_requeue_lists(ctrl); 164 } 165 166 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) 167 { 168 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) 169 return; 170 171 schedule_delayed_work(&ctrl->failfast_work, 172 ctrl->opts->fast_io_fail_tmo * HZ); 173 } 174 175 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) 176 { 177 if (!ctrl->opts) 178 return; 179 180 cancel_delayed_work_sync(&ctrl->failfast_work); 181 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 182 } 183 184 185 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 186 { 187 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 188 return -EBUSY; 189 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 190 return -EBUSY; 191 return 0; 192 } 193 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 194 195 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 196 { 197 int ret; 198 199 ret = nvme_reset_ctrl(ctrl); 200 if (!ret) { 201 flush_work(&ctrl->reset_work); 202 if (ctrl->state != NVME_CTRL_LIVE) 203 ret = -ENETRESET; 204 } 205 206 return ret; 207 } 208 209 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 210 { 211 dev_info(ctrl->device, 212 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl)); 213 214 flush_work(&ctrl->reset_work); 215 nvme_stop_ctrl(ctrl); 216 nvme_remove_namespaces(ctrl); 217 ctrl->ops->delete_ctrl(ctrl); 218 nvme_uninit_ctrl(ctrl); 219 } 220 221 static void nvme_delete_ctrl_work(struct work_struct *work) 222 { 223 struct nvme_ctrl *ctrl = 224 container_of(work, struct nvme_ctrl, delete_work); 225 226 nvme_do_delete_ctrl(ctrl); 227 } 228 229 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 230 { 231 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 232 return -EBUSY; 233 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 234 return -EBUSY; 235 return 0; 236 } 237 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 238 239 static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 240 { 241 /* 242 * Keep a reference until nvme_do_delete_ctrl() complete, 243 * since ->delete_ctrl can free the controller. 244 */ 245 nvme_get_ctrl(ctrl); 246 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 247 nvme_do_delete_ctrl(ctrl); 248 nvme_put_ctrl(ctrl); 249 } 250 251 static blk_status_t nvme_error_status(u16 status) 252 { 253 switch (status & 0x7ff) { 254 case NVME_SC_SUCCESS: 255 return BLK_STS_OK; 256 case NVME_SC_CAP_EXCEEDED: 257 return BLK_STS_NOSPC; 258 case NVME_SC_LBA_RANGE: 259 case NVME_SC_CMD_INTERRUPTED: 260 case NVME_SC_NS_NOT_READY: 261 return BLK_STS_TARGET; 262 case NVME_SC_BAD_ATTRIBUTES: 263 case NVME_SC_ONCS_NOT_SUPPORTED: 264 case NVME_SC_INVALID_OPCODE: 265 case NVME_SC_INVALID_FIELD: 266 case NVME_SC_INVALID_NS: 267 return BLK_STS_NOTSUPP; 268 case NVME_SC_WRITE_FAULT: 269 case NVME_SC_READ_ERROR: 270 case NVME_SC_UNWRITTEN_BLOCK: 271 case NVME_SC_ACCESS_DENIED: 272 case NVME_SC_READ_ONLY: 273 case NVME_SC_COMPARE_FAILED: 274 return BLK_STS_MEDIUM; 275 case NVME_SC_GUARD_CHECK: 276 case NVME_SC_APPTAG_CHECK: 277 case NVME_SC_REFTAG_CHECK: 278 case NVME_SC_INVALID_PI: 279 return BLK_STS_PROTECTION; 280 case NVME_SC_RESERVATION_CONFLICT: 281 return BLK_STS_NEXUS; 282 case NVME_SC_HOST_PATH_ERROR: 283 return BLK_STS_TRANSPORT; 284 case NVME_SC_ZONE_TOO_MANY_ACTIVE: 285 return BLK_STS_ZONE_ACTIVE_RESOURCE; 286 case NVME_SC_ZONE_TOO_MANY_OPEN: 287 return BLK_STS_ZONE_OPEN_RESOURCE; 288 default: 289 return BLK_STS_IOERR; 290 } 291 } 292 293 static void nvme_retry_req(struct request *req) 294 { 295 unsigned long delay = 0; 296 u16 crd; 297 298 /* The mask and shift result must be <= 3 */ 299 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 300 if (crd) 301 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; 302 303 nvme_req(req)->retries++; 304 blk_mq_requeue_request(req, false); 305 blk_mq_delay_kick_requeue_list(req->q, delay); 306 } 307 308 static void nvme_log_error(struct request *req) 309 { 310 struct nvme_ns *ns = req->q->queuedata; 311 struct nvme_request *nr = nvme_req(req); 312 313 if (ns) { 314 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", 315 ns->disk ? ns->disk->disk_name : "?", 316 nvme_get_opcode_str(nr->cmd->common.opcode), 317 nr->cmd->common.opcode, 318 (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)), 319 (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift, 320 nvme_get_error_status_str(nr->status), 321 nr->status >> 8 & 7, /* Status Code Type */ 322 nr->status & 0xff, /* Status Code */ 323 nr->status & NVME_SC_MORE ? "MORE " : "", 324 nr->status & NVME_SC_DNR ? "DNR " : ""); 325 return; 326 } 327 328 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n", 329 dev_name(nr->ctrl->device), 330 nvme_get_admin_opcode_str(nr->cmd->common.opcode), 331 nr->cmd->common.opcode, 332 nvme_get_error_status_str(nr->status), 333 nr->status >> 8 & 7, /* Status Code Type */ 334 nr->status & 0xff, /* Status Code */ 335 nr->status & NVME_SC_MORE ? "MORE " : "", 336 nr->status & NVME_SC_DNR ? "DNR " : ""); 337 } 338 339 enum nvme_disposition { 340 COMPLETE, 341 RETRY, 342 FAILOVER, 343 AUTHENTICATE, 344 }; 345 346 static inline enum nvme_disposition nvme_decide_disposition(struct request *req) 347 { 348 if (likely(nvme_req(req)->status == 0)) 349 return COMPLETE; 350 351 if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) 352 return AUTHENTICATE; 353 354 if (blk_noretry_request(req) || 355 (nvme_req(req)->status & NVME_SC_DNR) || 356 nvme_req(req)->retries >= nvme_max_retries) 357 return COMPLETE; 358 359 if (req->cmd_flags & REQ_NVME_MPATH) { 360 if (nvme_is_path_error(nvme_req(req)->status) || 361 blk_queue_dying(req->q)) 362 return FAILOVER; 363 } else { 364 if (blk_queue_dying(req->q)) 365 return COMPLETE; 366 } 367 368 return RETRY; 369 } 370 371 static inline void nvme_end_req_zoned(struct request *req) 372 { 373 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 374 req_op(req) == REQ_OP_ZONE_APPEND) 375 req->__sector = nvme_lba_to_sect(req->q->queuedata, 376 le64_to_cpu(nvme_req(req)->result.u64)); 377 } 378 379 static inline void nvme_end_req(struct request *req) 380 { 381 blk_status_t status = nvme_error_status(nvme_req(req)->status); 382 383 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) 384 nvme_log_error(req); 385 nvme_end_req_zoned(req); 386 nvme_trace_bio_complete(req); 387 if (req->cmd_flags & REQ_NVME_MPATH) 388 nvme_mpath_end_request(req); 389 blk_mq_end_request(req, status); 390 } 391 392 void nvme_complete_rq(struct request *req) 393 { 394 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 395 396 trace_nvme_complete_rq(req); 397 nvme_cleanup_cmd(req); 398 399 if (ctrl->kas) 400 ctrl->comp_seen = true; 401 402 switch (nvme_decide_disposition(req)) { 403 case COMPLETE: 404 nvme_end_req(req); 405 return; 406 case RETRY: 407 nvme_retry_req(req); 408 return; 409 case FAILOVER: 410 nvme_failover_req(req); 411 return; 412 case AUTHENTICATE: 413 #ifdef CONFIG_NVME_AUTH 414 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 415 nvme_retry_req(req); 416 #else 417 nvme_end_req(req); 418 #endif 419 return; 420 } 421 } 422 EXPORT_SYMBOL_GPL(nvme_complete_rq); 423 424 void nvme_complete_batch_req(struct request *req) 425 { 426 trace_nvme_complete_rq(req); 427 nvme_cleanup_cmd(req); 428 nvme_end_req_zoned(req); 429 } 430 EXPORT_SYMBOL_GPL(nvme_complete_batch_req); 431 432 /* 433 * Called to unwind from ->queue_rq on a failed command submission so that the 434 * multipathing code gets called to potentially failover to another path. 435 * The caller needs to unwind all transport specific resource allocations and 436 * must return propagate the return value. 437 */ 438 blk_status_t nvme_host_path_error(struct request *req) 439 { 440 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; 441 blk_mq_set_request_complete(req); 442 nvme_complete_rq(req); 443 return BLK_STS_OK; 444 } 445 EXPORT_SYMBOL_GPL(nvme_host_path_error); 446 447 bool nvme_cancel_request(struct request *req, void *data) 448 { 449 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 450 "Cancelling I/O %d", req->tag); 451 452 /* don't abort one completed request */ 453 if (blk_mq_request_completed(req)) 454 return true; 455 456 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; 457 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 458 blk_mq_complete_request(req); 459 return true; 460 } 461 EXPORT_SYMBOL_GPL(nvme_cancel_request); 462 463 void nvme_cancel_tagset(struct nvme_ctrl *ctrl) 464 { 465 if (ctrl->tagset) { 466 blk_mq_tagset_busy_iter(ctrl->tagset, 467 nvme_cancel_request, ctrl); 468 blk_mq_tagset_wait_completed_request(ctrl->tagset); 469 } 470 } 471 EXPORT_SYMBOL_GPL(nvme_cancel_tagset); 472 473 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) 474 { 475 if (ctrl->admin_tagset) { 476 blk_mq_tagset_busy_iter(ctrl->admin_tagset, 477 nvme_cancel_request, ctrl); 478 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 479 } 480 } 481 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); 482 483 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 484 enum nvme_ctrl_state new_state) 485 { 486 enum nvme_ctrl_state old_state; 487 unsigned long flags; 488 bool changed = false; 489 490 spin_lock_irqsave(&ctrl->lock, flags); 491 492 old_state = ctrl->state; 493 switch (new_state) { 494 case NVME_CTRL_LIVE: 495 switch (old_state) { 496 case NVME_CTRL_NEW: 497 case NVME_CTRL_RESETTING: 498 case NVME_CTRL_CONNECTING: 499 changed = true; 500 fallthrough; 501 default: 502 break; 503 } 504 break; 505 case NVME_CTRL_RESETTING: 506 switch (old_state) { 507 case NVME_CTRL_NEW: 508 case NVME_CTRL_LIVE: 509 changed = true; 510 fallthrough; 511 default: 512 break; 513 } 514 break; 515 case NVME_CTRL_CONNECTING: 516 switch (old_state) { 517 case NVME_CTRL_NEW: 518 case NVME_CTRL_RESETTING: 519 changed = true; 520 fallthrough; 521 default: 522 break; 523 } 524 break; 525 case NVME_CTRL_DELETING: 526 switch (old_state) { 527 case NVME_CTRL_LIVE: 528 case NVME_CTRL_RESETTING: 529 case NVME_CTRL_CONNECTING: 530 changed = true; 531 fallthrough; 532 default: 533 break; 534 } 535 break; 536 case NVME_CTRL_DELETING_NOIO: 537 switch (old_state) { 538 case NVME_CTRL_DELETING: 539 case NVME_CTRL_DEAD: 540 changed = true; 541 fallthrough; 542 default: 543 break; 544 } 545 break; 546 case NVME_CTRL_DEAD: 547 switch (old_state) { 548 case NVME_CTRL_DELETING: 549 changed = true; 550 fallthrough; 551 default: 552 break; 553 } 554 break; 555 default: 556 break; 557 } 558 559 if (changed) { 560 ctrl->state = new_state; 561 wake_up_all(&ctrl->state_wq); 562 } 563 564 spin_unlock_irqrestore(&ctrl->lock, flags); 565 if (!changed) 566 return false; 567 568 if (ctrl->state == NVME_CTRL_LIVE) { 569 if (old_state == NVME_CTRL_CONNECTING) 570 nvme_stop_failfast_work(ctrl); 571 nvme_kick_requeue_lists(ctrl); 572 } else if (ctrl->state == NVME_CTRL_CONNECTING && 573 old_state == NVME_CTRL_RESETTING) { 574 nvme_start_failfast_work(ctrl); 575 } 576 return changed; 577 } 578 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 579 580 /* 581 * Returns true for sink states that can't ever transition back to live. 582 */ 583 static bool nvme_state_terminal(struct nvme_ctrl *ctrl) 584 { 585 switch (ctrl->state) { 586 case NVME_CTRL_NEW: 587 case NVME_CTRL_LIVE: 588 case NVME_CTRL_RESETTING: 589 case NVME_CTRL_CONNECTING: 590 return false; 591 case NVME_CTRL_DELETING: 592 case NVME_CTRL_DELETING_NOIO: 593 case NVME_CTRL_DEAD: 594 return true; 595 default: 596 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); 597 return true; 598 } 599 } 600 601 /* 602 * Waits for the controller state to be resetting, or returns false if it is 603 * not possible to ever transition to that state. 604 */ 605 bool nvme_wait_reset(struct nvme_ctrl *ctrl) 606 { 607 wait_event(ctrl->state_wq, 608 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || 609 nvme_state_terminal(ctrl)); 610 return ctrl->state == NVME_CTRL_RESETTING; 611 } 612 EXPORT_SYMBOL_GPL(nvme_wait_reset); 613 614 static void nvme_free_ns_head(struct kref *ref) 615 { 616 struct nvme_ns_head *head = 617 container_of(ref, struct nvme_ns_head, ref); 618 619 nvme_mpath_remove_disk(head); 620 ida_free(&head->subsys->ns_ida, head->instance); 621 cleanup_srcu_struct(&head->srcu); 622 nvme_put_subsystem(head->subsys); 623 kfree(head); 624 } 625 626 bool nvme_tryget_ns_head(struct nvme_ns_head *head) 627 { 628 return kref_get_unless_zero(&head->ref); 629 } 630 631 void nvme_put_ns_head(struct nvme_ns_head *head) 632 { 633 kref_put(&head->ref, nvme_free_ns_head); 634 } 635 636 static void nvme_free_ns(struct kref *kref) 637 { 638 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 639 640 put_disk(ns->disk); 641 nvme_put_ns_head(ns->head); 642 nvme_put_ctrl(ns->ctrl); 643 kfree(ns); 644 } 645 646 static inline bool nvme_get_ns(struct nvme_ns *ns) 647 { 648 return kref_get_unless_zero(&ns->kref); 649 } 650 651 void nvme_put_ns(struct nvme_ns *ns) 652 { 653 kref_put(&ns->kref, nvme_free_ns); 654 } 655 EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); 656 657 static inline void nvme_clear_nvme_request(struct request *req) 658 { 659 nvme_req(req)->status = 0; 660 nvme_req(req)->retries = 0; 661 nvme_req(req)->flags = 0; 662 req->rq_flags |= RQF_DONTPREP; 663 } 664 665 /* initialize a passthrough request */ 666 void nvme_init_request(struct request *req, struct nvme_command *cmd) 667 { 668 if (req->q->queuedata) 669 req->timeout = NVME_IO_TIMEOUT; 670 else /* no queuedata implies admin queue */ 671 req->timeout = NVME_ADMIN_TIMEOUT; 672 673 /* passthru commands should let the driver set the SGL flags */ 674 cmd->common.flags &= ~NVME_CMD_SGL_ALL; 675 676 req->cmd_flags |= REQ_FAILFAST_DRIVER; 677 if (req->mq_hctx->type == HCTX_TYPE_POLL) 678 req->cmd_flags |= REQ_POLLED; 679 nvme_clear_nvme_request(req); 680 req->rq_flags |= RQF_QUIET; 681 memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); 682 } 683 EXPORT_SYMBOL_GPL(nvme_init_request); 684 685 /* 686 * For something we're not in a state to send to the device the default action 687 * is to busy it and retry it after the controller state is recovered. However, 688 * if the controller is deleting or if anything is marked for failfast or 689 * nvme multipath it is immediately failed. 690 * 691 * Note: commands used to initialize the controller will be marked for failfast. 692 * Note: nvme cli/ioctl commands are marked for failfast. 693 */ 694 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, 695 struct request *rq) 696 { 697 if (ctrl->state != NVME_CTRL_DELETING_NOIO && 698 ctrl->state != NVME_CTRL_DELETING && 699 ctrl->state != NVME_CTRL_DEAD && 700 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && 701 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 702 return BLK_STS_RESOURCE; 703 return nvme_host_path_error(rq); 704 } 705 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); 706 707 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 708 bool queue_live) 709 { 710 struct nvme_request *req = nvme_req(rq); 711 712 /* 713 * currently we have a problem sending passthru commands 714 * on the admin_q if the controller is not LIVE because we can't 715 * make sure that they are going out after the admin connect, 716 * controller enable and/or other commands in the initialization 717 * sequence. until the controller will be LIVE, fail with 718 * BLK_STS_RESOURCE so that they will be rescheduled. 719 */ 720 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) 721 return false; 722 723 if (ctrl->ops->flags & NVME_F_FABRICS) { 724 /* 725 * Only allow commands on a live queue, except for the connect 726 * command, which is require to set the queue live in the 727 * appropinquate states. 728 */ 729 switch (ctrl->state) { 730 case NVME_CTRL_CONNECTING: 731 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && 732 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || 733 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || 734 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) 735 return true; 736 break; 737 default: 738 break; 739 case NVME_CTRL_DEAD: 740 return false; 741 } 742 } 743 744 return queue_live; 745 } 746 EXPORT_SYMBOL_GPL(__nvme_check_ready); 747 748 static inline void nvme_setup_flush(struct nvme_ns *ns, 749 struct nvme_command *cmnd) 750 { 751 memset(cmnd, 0, sizeof(*cmnd)); 752 cmnd->common.opcode = nvme_cmd_flush; 753 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 754 } 755 756 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 757 struct nvme_command *cmnd) 758 { 759 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 760 struct nvme_dsm_range *range; 761 struct bio *bio; 762 763 /* 764 * Some devices do not consider the DSM 'Number of Ranges' field when 765 * determining how much data to DMA. Always allocate memory for maximum 766 * number of segments to prevent device reading beyond end of buffer. 767 */ 768 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; 769 770 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); 771 if (!range) { 772 /* 773 * If we fail allocation our range, fallback to the controller 774 * discard page. If that's also busy, it's safe to return 775 * busy, as we know we can make progress once that's freed. 776 */ 777 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 778 return BLK_STS_RESOURCE; 779 780 range = page_address(ns->ctrl->discard_page); 781 } 782 783 __rq_for_each_bio(bio, req) { 784 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); 785 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 786 787 if (n < segments) { 788 range[n].cattr = cpu_to_le32(0); 789 range[n].nlb = cpu_to_le32(nlb); 790 range[n].slba = cpu_to_le64(slba); 791 } 792 n++; 793 } 794 795 if (WARN_ON_ONCE(n != segments)) { 796 if (virt_to_page(range) == ns->ctrl->discard_page) 797 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 798 else 799 kfree(range); 800 return BLK_STS_IOERR; 801 } 802 803 memset(cmnd, 0, sizeof(*cmnd)); 804 cmnd->dsm.opcode = nvme_cmd_dsm; 805 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 806 cmnd->dsm.nr = cpu_to_le32(segments - 1); 807 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 808 809 req->special_vec.bv_page = virt_to_page(range); 810 req->special_vec.bv_offset = offset_in_page(range); 811 req->special_vec.bv_len = alloc_size; 812 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 813 814 return BLK_STS_OK; 815 } 816 817 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, 818 struct request *req) 819 { 820 u32 upper, lower; 821 u64 ref48; 822 823 /* both rw and write zeroes share the same reftag format */ 824 switch (ns->guard_type) { 825 case NVME_NVM_NS_16B_GUARD: 826 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 827 break; 828 case NVME_NVM_NS_64B_GUARD: 829 ref48 = ext_pi_ref_tag(req); 830 lower = lower_32_bits(ref48); 831 upper = upper_32_bits(ref48); 832 833 cmnd->rw.reftag = cpu_to_le32(lower); 834 cmnd->rw.cdw3 = cpu_to_le32(upper); 835 break; 836 default: 837 break; 838 } 839 } 840 841 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 842 struct request *req, struct nvme_command *cmnd) 843 { 844 memset(cmnd, 0, sizeof(*cmnd)); 845 846 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 847 return nvme_setup_discard(ns, req, cmnd); 848 849 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 850 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 851 cmnd->write_zeroes.slba = 852 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 853 cmnd->write_zeroes.length = 854 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 855 856 if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC)) 857 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); 858 859 if (nvme_ns_has_pi(ns)) { 860 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); 861 862 switch (ns->pi_type) { 863 case NVME_NS_DPS_PI_TYPE1: 864 case NVME_NS_DPS_PI_TYPE2: 865 nvme_set_ref_tag(ns, cmnd, req); 866 break; 867 } 868 } 869 870 return BLK_STS_OK; 871 } 872 873 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 874 struct request *req, struct nvme_command *cmnd, 875 enum nvme_opcode op) 876 { 877 u16 control = 0; 878 u32 dsmgmt = 0; 879 880 if (req->cmd_flags & REQ_FUA) 881 control |= NVME_RW_FUA; 882 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 883 control |= NVME_RW_LR; 884 885 if (req->cmd_flags & REQ_RAHEAD) 886 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 887 888 cmnd->rw.opcode = op; 889 cmnd->rw.flags = 0; 890 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 891 cmnd->rw.cdw2 = 0; 892 cmnd->rw.cdw3 = 0; 893 cmnd->rw.metadata = 0; 894 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 895 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 896 cmnd->rw.reftag = 0; 897 cmnd->rw.apptag = 0; 898 cmnd->rw.appmask = 0; 899 900 if (ns->ms) { 901 /* 902 * If formated with metadata, the block layer always provides a 903 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 904 * we enable the PRACT bit for protection information or set the 905 * namespace capacity to zero to prevent any I/O. 906 */ 907 if (!blk_integrity_rq(req)) { 908 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 909 return BLK_STS_NOTSUPP; 910 control |= NVME_RW_PRINFO_PRACT; 911 } 912 913 switch (ns->pi_type) { 914 case NVME_NS_DPS_PI_TYPE3: 915 control |= NVME_RW_PRINFO_PRCHK_GUARD; 916 break; 917 case NVME_NS_DPS_PI_TYPE1: 918 case NVME_NS_DPS_PI_TYPE2: 919 control |= NVME_RW_PRINFO_PRCHK_GUARD | 920 NVME_RW_PRINFO_PRCHK_REF; 921 if (op == nvme_cmd_zone_append) 922 control |= NVME_RW_APPEND_PIREMAP; 923 nvme_set_ref_tag(ns, cmnd, req); 924 break; 925 } 926 } 927 928 cmnd->rw.control = cpu_to_le16(control); 929 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 930 return 0; 931 } 932 933 void nvme_cleanup_cmd(struct request *req) 934 { 935 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 936 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 937 938 if (req->special_vec.bv_page == ctrl->discard_page) 939 clear_bit_unlock(0, &ctrl->discard_page_busy); 940 else 941 kfree(bvec_virt(&req->special_vec)); 942 } 943 } 944 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 945 946 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 947 { 948 struct nvme_command *cmd = nvme_req(req)->cmd; 949 blk_status_t ret = BLK_STS_OK; 950 951 if (!(req->rq_flags & RQF_DONTPREP)) 952 nvme_clear_nvme_request(req); 953 954 switch (req_op(req)) { 955 case REQ_OP_DRV_IN: 956 case REQ_OP_DRV_OUT: 957 /* these are setup prior to execution in nvme_init_request() */ 958 break; 959 case REQ_OP_FLUSH: 960 nvme_setup_flush(ns, cmd); 961 break; 962 case REQ_OP_ZONE_RESET_ALL: 963 case REQ_OP_ZONE_RESET: 964 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); 965 break; 966 case REQ_OP_ZONE_OPEN: 967 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); 968 break; 969 case REQ_OP_ZONE_CLOSE: 970 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); 971 break; 972 case REQ_OP_ZONE_FINISH: 973 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); 974 break; 975 case REQ_OP_WRITE_ZEROES: 976 ret = nvme_setup_write_zeroes(ns, req, cmd); 977 break; 978 case REQ_OP_DISCARD: 979 ret = nvme_setup_discard(ns, req, cmd); 980 break; 981 case REQ_OP_READ: 982 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); 983 break; 984 case REQ_OP_WRITE: 985 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); 986 break; 987 case REQ_OP_ZONE_APPEND: 988 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); 989 break; 990 default: 991 WARN_ON_ONCE(1); 992 return BLK_STS_IOERR; 993 } 994 995 cmd->common.command_id = nvme_cid(req); 996 trace_nvme_setup_cmd(req, cmd); 997 return ret; 998 } 999 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 1000 1001 /* 1002 * Return values: 1003 * 0: success 1004 * >0: nvme controller's cqe status response 1005 * <0: kernel error in lieu of controller response 1006 */ 1007 static int nvme_execute_rq(struct request *rq, bool at_head) 1008 { 1009 blk_status_t status; 1010 1011 status = blk_execute_rq(rq, at_head); 1012 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) 1013 return -EINTR; 1014 if (nvme_req(rq)->status) 1015 return nvme_req(rq)->status; 1016 return blk_status_to_errno(status); 1017 } 1018 1019 /* 1020 * Returns 0 on success. If the result is negative, it's a Linux error code; 1021 * if the result is positive, it's an NVM Express status code 1022 */ 1023 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1024 union nvme_result *result, void *buffer, unsigned bufflen, 1025 int qid, int at_head, blk_mq_req_flags_t flags) 1026 { 1027 struct request *req; 1028 int ret; 1029 1030 if (qid == NVME_QID_ANY) 1031 req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); 1032 else 1033 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, 1034 qid - 1); 1035 1036 if (IS_ERR(req)) 1037 return PTR_ERR(req); 1038 nvme_init_request(req, cmd); 1039 1040 if (buffer && bufflen) { 1041 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 1042 if (ret) 1043 goto out; 1044 } 1045 1046 ret = nvme_execute_rq(req, at_head); 1047 if (result && ret >= 0) 1048 *result = nvme_req(req)->result; 1049 out: 1050 blk_mq_free_request(req); 1051 return ret; 1052 } 1053 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 1054 1055 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1056 void *buffer, unsigned bufflen) 1057 { 1058 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 1059 NVME_QID_ANY, 0, 0); 1060 } 1061 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 1062 1063 static u32 nvme_known_admin_effects(u8 opcode) 1064 { 1065 switch (opcode) { 1066 case nvme_admin_format_nvm: 1067 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC | 1068 NVME_CMD_EFFECTS_CSE_MASK; 1069 case nvme_admin_sanitize_nvm: 1070 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK; 1071 default: 1072 break; 1073 } 1074 return 0; 1075 } 1076 1077 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1078 { 1079 u32 effects = 0; 1080 1081 if (ns) { 1082 if (ns->head->effects) 1083 effects = le32_to_cpu(ns->head->effects->iocs[opcode]); 1084 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1085 dev_warn_once(ctrl->device, 1086 "IO command:%02x has unhandled effects:%08x\n", 1087 opcode, effects); 1088 return 0; 1089 } 1090 1091 if (ctrl->effects) 1092 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1093 effects |= nvme_known_admin_effects(opcode); 1094 1095 return effects; 1096 } 1097 EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); 1098 1099 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1100 u8 opcode) 1101 { 1102 u32 effects = nvme_command_effects(ctrl, ns, opcode); 1103 1104 /* 1105 * For simplicity, IO to all namespaces is quiesced even if the command 1106 * effects say only one namespace is affected. 1107 */ 1108 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1109 mutex_lock(&ctrl->scan_lock); 1110 mutex_lock(&ctrl->subsys->lock); 1111 nvme_mpath_start_freeze(ctrl->subsys); 1112 nvme_mpath_wait_freeze(ctrl->subsys); 1113 nvme_start_freeze(ctrl); 1114 nvme_wait_freeze(ctrl); 1115 } 1116 return effects; 1117 } 1118 1119 void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, 1120 struct nvme_command *cmd, int status) 1121 { 1122 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1123 nvme_unfreeze(ctrl); 1124 nvme_mpath_unfreeze(ctrl->subsys); 1125 mutex_unlock(&ctrl->subsys->lock); 1126 mutex_unlock(&ctrl->scan_lock); 1127 } 1128 if (effects & NVME_CMD_EFFECTS_CCC) { 1129 dev_info(ctrl->device, 1130 "controller capabilities changed, reset may be required to take effect.\n"); 1131 } 1132 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { 1133 nvme_queue_scan(ctrl); 1134 flush_work(&ctrl->scan_work); 1135 } 1136 1137 switch (cmd->common.opcode) { 1138 case nvme_admin_set_features: 1139 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) { 1140 case NVME_FEAT_KATO: 1141 /* 1142 * Keep alive commands interval on the host should be 1143 * updated when KATO is modified by Set Features 1144 * commands. 1145 */ 1146 if (!status) 1147 nvme_update_keep_alive(ctrl, cmd); 1148 break; 1149 default: 1150 break; 1151 } 1152 break; 1153 default: 1154 break; 1155 } 1156 } 1157 EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); 1158 1159 int nvme_execute_passthru_rq(struct request *rq, u32 *effects) 1160 { 1161 struct nvme_command *cmd = nvme_req(rq)->cmd; 1162 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; 1163 struct nvme_ns *ns = rq->q->queuedata; 1164 1165 *effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); 1166 return nvme_execute_rq(rq, false); 1167 } 1168 EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU); 1169 1170 /* 1171 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: 1172 * 1173 * The host should send Keep Alive commands at half of the Keep Alive Timeout 1174 * accounting for transport roundtrip times [..]. 1175 */ 1176 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) 1177 { 1178 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2); 1179 } 1180 1181 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, 1182 blk_status_t status) 1183 { 1184 struct nvme_ctrl *ctrl = rq->end_io_data; 1185 unsigned long flags; 1186 bool startka = false; 1187 1188 blk_mq_free_request(rq); 1189 1190 if (status) { 1191 dev_err(ctrl->device, 1192 "failed nvme_keep_alive_end_io error=%d\n", 1193 status); 1194 return RQ_END_IO_NONE; 1195 } 1196 1197 ctrl->comp_seen = false; 1198 spin_lock_irqsave(&ctrl->lock, flags); 1199 if (ctrl->state == NVME_CTRL_LIVE || 1200 ctrl->state == NVME_CTRL_CONNECTING) 1201 startka = true; 1202 spin_unlock_irqrestore(&ctrl->lock, flags); 1203 if (startka) 1204 nvme_queue_keep_alive_work(ctrl); 1205 return RQ_END_IO_NONE; 1206 } 1207 1208 static void nvme_keep_alive_work(struct work_struct *work) 1209 { 1210 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 1211 struct nvme_ctrl, ka_work); 1212 bool comp_seen = ctrl->comp_seen; 1213 struct request *rq; 1214 1215 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1216 dev_dbg(ctrl->device, 1217 "reschedule traffic based keep-alive timer\n"); 1218 ctrl->comp_seen = false; 1219 nvme_queue_keep_alive_work(ctrl); 1220 return; 1221 } 1222 1223 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd), 1224 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 1225 if (IS_ERR(rq)) { 1226 /* allocation failure, reset the controller */ 1227 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); 1228 nvme_reset_ctrl(ctrl); 1229 return; 1230 } 1231 nvme_init_request(rq, &ctrl->ka_cmd); 1232 1233 rq->timeout = ctrl->kato * HZ; 1234 rq->end_io = nvme_keep_alive_end_io; 1235 rq->end_io_data = ctrl; 1236 blk_execute_rq_nowait(rq, false); 1237 } 1238 1239 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 1240 { 1241 if (unlikely(ctrl->kato == 0)) 1242 return; 1243 1244 nvme_queue_keep_alive_work(ctrl); 1245 } 1246 1247 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 1248 { 1249 if (unlikely(ctrl->kato == 0)) 1250 return; 1251 1252 cancel_delayed_work_sync(&ctrl->ka_work); 1253 } 1254 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 1255 1256 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 1257 struct nvme_command *cmd) 1258 { 1259 unsigned int new_kato = 1260 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000); 1261 1262 dev_info(ctrl->device, 1263 "keep alive interval updated from %u ms to %u ms\n", 1264 ctrl->kato * 1000 / 2, new_kato * 1000 / 2); 1265 1266 nvme_stop_keep_alive(ctrl); 1267 ctrl->kato = new_kato; 1268 nvme_start_keep_alive(ctrl); 1269 } 1270 1271 /* 1272 * In NVMe 1.0 the CNS field was just a binary controller or namespace 1273 * flag, thus sending any new CNS opcodes has a big chance of not working. 1274 * Qemu unfortunately had that bug after reporting a 1.1 version compliance 1275 * (but not for any later version). 1276 */ 1277 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) 1278 { 1279 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) 1280 return ctrl->vs < NVME_VS(1, 2, 0); 1281 return ctrl->vs < NVME_VS(1, 1, 0); 1282 } 1283 1284 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 1285 { 1286 struct nvme_command c = { }; 1287 int error; 1288 1289 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1290 c.identify.opcode = nvme_admin_identify; 1291 c.identify.cns = NVME_ID_CNS_CTRL; 1292 1293 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 1294 if (!*id) 1295 return -ENOMEM; 1296 1297 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1298 sizeof(struct nvme_id_ctrl)); 1299 if (error) 1300 kfree(*id); 1301 return error; 1302 } 1303 1304 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, 1305 struct nvme_ns_id_desc *cur, bool *csi_seen) 1306 { 1307 const char *warn_str = "ctrl returned bogus length:"; 1308 void *data = cur; 1309 1310 switch (cur->nidt) { 1311 case NVME_NIDT_EUI64: 1312 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1313 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", 1314 warn_str, cur->nidl); 1315 return -1; 1316 } 1317 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1318 return NVME_NIDT_EUI64_LEN; 1319 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); 1320 return NVME_NIDT_EUI64_LEN; 1321 case NVME_NIDT_NGUID: 1322 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1323 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", 1324 warn_str, cur->nidl); 1325 return -1; 1326 } 1327 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1328 return NVME_NIDT_NGUID_LEN; 1329 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); 1330 return NVME_NIDT_NGUID_LEN; 1331 case NVME_NIDT_UUID: 1332 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1333 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", 1334 warn_str, cur->nidl); 1335 return -1; 1336 } 1337 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1338 return NVME_NIDT_UUID_LEN; 1339 uuid_copy(&ids->uuid, data + sizeof(*cur)); 1340 return NVME_NIDT_UUID_LEN; 1341 case NVME_NIDT_CSI: 1342 if (cur->nidl != NVME_NIDT_CSI_LEN) { 1343 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", 1344 warn_str, cur->nidl); 1345 return -1; 1346 } 1347 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); 1348 *csi_seen = true; 1349 return NVME_NIDT_CSI_LEN; 1350 default: 1351 /* Skip unknown types */ 1352 return cur->nidl; 1353 } 1354 } 1355 1356 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, 1357 struct nvme_ns_info *info) 1358 { 1359 struct nvme_command c = { }; 1360 bool csi_seen = false; 1361 int status, pos, len; 1362 void *data; 1363 1364 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) 1365 return 0; 1366 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) 1367 return 0; 1368 1369 c.identify.opcode = nvme_admin_identify; 1370 c.identify.nsid = cpu_to_le32(info->nsid); 1371 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1372 1373 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1374 if (!data) 1375 return -ENOMEM; 1376 1377 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1378 NVME_IDENTIFY_DATA_SIZE); 1379 if (status) { 1380 dev_warn(ctrl->device, 1381 "Identify Descriptors failed (nsid=%u, status=0x%x)\n", 1382 info->nsid, status); 1383 goto free_data; 1384 } 1385 1386 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1387 struct nvme_ns_id_desc *cur = data + pos; 1388 1389 if (cur->nidl == 0) 1390 break; 1391 1392 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen); 1393 if (len < 0) 1394 break; 1395 1396 len += sizeof(*cur); 1397 } 1398 1399 if (nvme_multi_css(ctrl) && !csi_seen) { 1400 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", 1401 info->nsid); 1402 status = -EINVAL; 1403 } 1404 1405 free_data: 1406 kfree(data); 1407 return status; 1408 } 1409 1410 static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, 1411 struct nvme_id_ns **id) 1412 { 1413 struct nvme_command c = { }; 1414 int error; 1415 1416 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1417 c.identify.opcode = nvme_admin_identify; 1418 c.identify.nsid = cpu_to_le32(nsid); 1419 c.identify.cns = NVME_ID_CNS_NS; 1420 1421 *id = kmalloc(sizeof(**id), GFP_KERNEL); 1422 if (!*id) 1423 return -ENOMEM; 1424 1425 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); 1426 if (error) { 1427 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1428 goto out_free_id; 1429 } 1430 1431 error = NVME_SC_INVALID_NS | NVME_SC_DNR; 1432 if ((*id)->ncap == 0) /* namespace not allocated or attached */ 1433 goto out_free_id; 1434 return 0; 1435 1436 out_free_id: 1437 kfree(*id); 1438 return error; 1439 } 1440 1441 static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, 1442 struct nvme_ns_info *info) 1443 { 1444 struct nvme_ns_ids *ids = &info->ids; 1445 struct nvme_id_ns *id; 1446 int ret; 1447 1448 ret = nvme_identify_ns(ctrl, info->nsid, &id); 1449 if (ret) 1450 return ret; 1451 info->anagrpid = id->anagrpid; 1452 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1453 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1454 info->is_ready = true; 1455 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { 1456 dev_info(ctrl->device, 1457 "Ignoring bogus Namespace Identifiers\n"); 1458 } else { 1459 if (ctrl->vs >= NVME_VS(1, 1, 0) && 1460 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 1461 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64)); 1462 if (ctrl->vs >= NVME_VS(1, 2, 0) && 1463 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 1464 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); 1465 } 1466 kfree(id); 1467 return 0; 1468 } 1469 1470 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, 1471 struct nvme_ns_info *info) 1472 { 1473 struct nvme_id_ns_cs_indep *id; 1474 struct nvme_command c = { 1475 .identify.opcode = nvme_admin_identify, 1476 .identify.nsid = cpu_to_le32(info->nsid), 1477 .identify.cns = NVME_ID_CNS_NS_CS_INDEP, 1478 }; 1479 int ret; 1480 1481 id = kmalloc(sizeof(*id), GFP_KERNEL); 1482 if (!id) 1483 return -ENOMEM; 1484 1485 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 1486 if (!ret) { 1487 info->anagrpid = id->anagrpid; 1488 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1489 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1490 info->is_ready = id->nstat & NVME_NSTAT_NRDY; 1491 } 1492 kfree(id); 1493 return ret; 1494 } 1495 1496 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, 1497 unsigned int dword11, void *buffer, size_t buflen, u32 *result) 1498 { 1499 union nvme_result res = { 0 }; 1500 struct nvme_command c = { }; 1501 int ret; 1502 1503 c.features.opcode = op; 1504 c.features.fid = cpu_to_le32(fid); 1505 c.features.dword11 = cpu_to_le32(dword11); 1506 1507 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1508 buffer, buflen, NVME_QID_ANY, 0, 0); 1509 if (ret >= 0 && result) 1510 *result = le32_to_cpu(res.u32); 1511 return ret; 1512 } 1513 1514 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 1515 unsigned int dword11, void *buffer, size_t buflen, 1516 u32 *result) 1517 { 1518 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, 1519 buflen, result); 1520 } 1521 EXPORT_SYMBOL_GPL(nvme_set_features); 1522 1523 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 1524 unsigned int dword11, void *buffer, size_t buflen, 1525 u32 *result) 1526 { 1527 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, 1528 buflen, result); 1529 } 1530 EXPORT_SYMBOL_GPL(nvme_get_features); 1531 1532 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1533 { 1534 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1535 u32 result; 1536 int status, nr_io_queues; 1537 1538 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1539 &result); 1540 if (status < 0) 1541 return status; 1542 1543 /* 1544 * Degraded controllers might return an error when setting the queue 1545 * count. We still want to be able to bring them online and offer 1546 * access to the admin queue, as that might be only way to fix them up. 1547 */ 1548 if (status > 0) { 1549 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1550 *count = 0; 1551 } else { 1552 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1553 *count = min(*count, nr_io_queues); 1554 } 1555 1556 return 0; 1557 } 1558 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1559 1560 #define NVME_AEN_SUPPORTED \ 1561 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ 1562 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) 1563 1564 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1565 { 1566 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1567 int status; 1568 1569 if (!supported_aens) 1570 return; 1571 1572 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1573 NULL, 0, &result); 1574 if (status) 1575 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1576 supported_aens); 1577 1578 queue_work(nvme_wq, &ctrl->async_event_work); 1579 } 1580 1581 static int nvme_ns_open(struct nvme_ns *ns) 1582 { 1583 1584 /* should never be called due to GENHD_FL_HIDDEN */ 1585 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) 1586 goto fail; 1587 if (!nvme_get_ns(ns)) 1588 goto fail; 1589 if (!try_module_get(ns->ctrl->ops->module)) 1590 goto fail_put_ns; 1591 1592 return 0; 1593 1594 fail_put_ns: 1595 nvme_put_ns(ns); 1596 fail: 1597 return -ENXIO; 1598 } 1599 1600 static void nvme_ns_release(struct nvme_ns *ns) 1601 { 1602 1603 module_put(ns->ctrl->ops->module); 1604 nvme_put_ns(ns); 1605 } 1606 1607 static int nvme_open(struct block_device *bdev, fmode_t mode) 1608 { 1609 return nvme_ns_open(bdev->bd_disk->private_data); 1610 } 1611 1612 static void nvme_release(struct gendisk *disk, fmode_t mode) 1613 { 1614 nvme_ns_release(disk->private_data); 1615 } 1616 1617 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1618 { 1619 /* some standard values */ 1620 geo->heads = 1 << 6; 1621 geo->sectors = 1 << 5; 1622 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1623 return 0; 1624 } 1625 1626 #ifdef CONFIG_BLK_DEV_INTEGRITY 1627 static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, 1628 u32 max_integrity_segments) 1629 { 1630 struct blk_integrity integrity = { }; 1631 1632 switch (ns->pi_type) { 1633 case NVME_NS_DPS_PI_TYPE3: 1634 switch (ns->guard_type) { 1635 case NVME_NVM_NS_16B_GUARD: 1636 integrity.profile = &t10_pi_type3_crc; 1637 integrity.tag_size = sizeof(u16) + sizeof(u32); 1638 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1639 break; 1640 case NVME_NVM_NS_64B_GUARD: 1641 integrity.profile = &ext_pi_type3_crc64; 1642 integrity.tag_size = sizeof(u16) + 6; 1643 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1644 break; 1645 default: 1646 integrity.profile = NULL; 1647 break; 1648 } 1649 break; 1650 case NVME_NS_DPS_PI_TYPE1: 1651 case NVME_NS_DPS_PI_TYPE2: 1652 switch (ns->guard_type) { 1653 case NVME_NVM_NS_16B_GUARD: 1654 integrity.profile = &t10_pi_type1_crc; 1655 integrity.tag_size = sizeof(u16); 1656 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1657 break; 1658 case NVME_NVM_NS_64B_GUARD: 1659 integrity.profile = &ext_pi_type1_crc64; 1660 integrity.tag_size = sizeof(u16); 1661 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1662 break; 1663 default: 1664 integrity.profile = NULL; 1665 break; 1666 } 1667 break; 1668 default: 1669 integrity.profile = NULL; 1670 break; 1671 } 1672 1673 integrity.tuple_size = ns->ms; 1674 blk_integrity_register(disk, &integrity); 1675 blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); 1676 } 1677 #else 1678 static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, 1679 u32 max_integrity_segments) 1680 { 1681 } 1682 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1683 1684 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) 1685 { 1686 struct nvme_ctrl *ctrl = ns->ctrl; 1687 struct request_queue *queue = disk->queue; 1688 u32 size = queue_logical_block_size(queue); 1689 1690 if (ctrl->max_discard_sectors == 0) { 1691 blk_queue_max_discard_sectors(queue, 0); 1692 return; 1693 } 1694 1695 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1696 NVME_DSM_MAX_RANGES); 1697 1698 queue->limits.discard_granularity = size; 1699 1700 /* If discard is already enabled, don't reset queue limits */ 1701 if (queue->limits.max_discard_sectors) 1702 return; 1703 1704 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX)) 1705 ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl); 1706 1707 blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors); 1708 blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); 1709 1710 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1711 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1712 } 1713 1714 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1715 { 1716 return uuid_equal(&a->uuid, &b->uuid) && 1717 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1718 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && 1719 a->csi == b->csi; 1720 } 1721 1722 static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) 1723 { 1724 bool first = id->dps & NVME_NS_DPS_PI_FIRST; 1725 unsigned lbaf = nvme_lbaf_index(id->flbas); 1726 struct nvme_ctrl *ctrl = ns->ctrl; 1727 struct nvme_command c = { }; 1728 struct nvme_id_ns_nvm *nvm; 1729 int ret = 0; 1730 u32 elbaf; 1731 1732 ns->pi_size = 0; 1733 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 1734 if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { 1735 ns->pi_size = sizeof(struct t10_pi_tuple); 1736 ns->guard_type = NVME_NVM_NS_16B_GUARD; 1737 goto set_pi; 1738 } 1739 1740 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 1741 if (!nvm) 1742 return -ENOMEM; 1743 1744 c.identify.opcode = nvme_admin_identify; 1745 c.identify.nsid = cpu_to_le32(ns->head->ns_id); 1746 c.identify.cns = NVME_ID_CNS_CS_NS; 1747 c.identify.csi = NVME_CSI_NVM; 1748 1749 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm)); 1750 if (ret) 1751 goto free_data; 1752 1753 elbaf = le32_to_cpu(nvm->elbaf[lbaf]); 1754 1755 /* no support for storage tag formats right now */ 1756 if (nvme_elbaf_sts(elbaf)) 1757 goto free_data; 1758 1759 ns->guard_type = nvme_elbaf_guard_type(elbaf); 1760 switch (ns->guard_type) { 1761 case NVME_NVM_NS_64B_GUARD: 1762 ns->pi_size = sizeof(struct crc64_pi_tuple); 1763 break; 1764 case NVME_NVM_NS_16B_GUARD: 1765 ns->pi_size = sizeof(struct t10_pi_tuple); 1766 break; 1767 default: 1768 break; 1769 } 1770 1771 free_data: 1772 kfree(nvm); 1773 set_pi: 1774 if (ns->pi_size && (first || ns->ms == ns->pi_size)) 1775 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1776 else 1777 ns->pi_type = 0; 1778 1779 return ret; 1780 } 1781 1782 static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) 1783 { 1784 struct nvme_ctrl *ctrl = ns->ctrl; 1785 1786 if (nvme_init_ms(ns, id)) 1787 return; 1788 1789 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1790 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1791 return; 1792 1793 if (ctrl->ops->flags & NVME_F_FABRICS) { 1794 /* 1795 * The NVMe over Fabrics specification only supports metadata as 1796 * part of the extended data LBA. We rely on HCA/HBA support to 1797 * remap the separate metadata buffer from the block layer. 1798 */ 1799 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1800 return; 1801 1802 ns->features |= NVME_NS_EXT_LBAS; 1803 1804 /* 1805 * The current fabrics transport drivers support namespace 1806 * metadata formats only if nvme_ns_has_pi() returns true. 1807 * Suppress support for all other formats so the namespace will 1808 * have a 0 capacity and not be usable through the block stack. 1809 * 1810 * Note, this check will need to be modified if any drivers 1811 * gain the ability to use other metadata formats. 1812 */ 1813 if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) 1814 ns->features |= NVME_NS_METADATA_SUPPORTED; 1815 } else { 1816 /* 1817 * For PCIe controllers, we can't easily remap the separate 1818 * metadata buffer from the block layer and thus require a 1819 * separate metadata buffer for block layer metadata/PI support. 1820 * We allow extended LBAs for the passthrough interface, though. 1821 */ 1822 if (id->flbas & NVME_NS_FLBAS_META_EXT) 1823 ns->features |= NVME_NS_EXT_LBAS; 1824 else 1825 ns->features |= NVME_NS_METADATA_SUPPORTED; 1826 } 1827 } 1828 1829 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1830 struct request_queue *q) 1831 { 1832 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; 1833 1834 if (ctrl->max_hw_sectors) { 1835 u32 max_segments = 1836 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; 1837 1838 max_segments = min_not_zero(max_segments, ctrl->max_segments); 1839 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1840 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1841 } 1842 blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); 1843 blk_queue_dma_alignment(q, 3); 1844 blk_queue_write_cache(q, vwc, vwc); 1845 } 1846 1847 static void nvme_update_disk_info(struct gendisk *disk, 1848 struct nvme_ns *ns, struct nvme_id_ns *id) 1849 { 1850 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); 1851 unsigned short bs = 1 << ns->lba_shift; 1852 u32 atomic_bs, phys_bs, io_opt = 0; 1853 1854 /* 1855 * The block layer can't support LBA sizes larger than the page size 1856 * yet, so catch this early and don't allow block I/O. 1857 */ 1858 if (ns->lba_shift > PAGE_SHIFT) { 1859 capacity = 0; 1860 bs = (1 << 9); 1861 } 1862 1863 blk_integrity_unregister(disk); 1864 1865 atomic_bs = phys_bs = bs; 1866 if (id->nabo == 0) { 1867 /* 1868 * Bit 1 indicates whether NAWUPF is defined for this namespace 1869 * and whether it should be used instead of AWUPF. If NAWUPF == 1870 * 0 then AWUPF must be used instead. 1871 */ 1872 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) 1873 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; 1874 else 1875 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; 1876 } 1877 1878 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { 1879 /* NPWG = Namespace Preferred Write Granularity */ 1880 phys_bs = bs * (1 + le16_to_cpu(id->npwg)); 1881 /* NOWS = Namespace Optimal Write Size */ 1882 io_opt = bs * (1 + le16_to_cpu(id->nows)); 1883 } 1884 1885 blk_queue_logical_block_size(disk->queue, bs); 1886 /* 1887 * Linux filesystems assume writing a single physical block is 1888 * an atomic operation. Hence limit the physical block size to the 1889 * value of the Atomic Write Unit Power Fail parameter. 1890 */ 1891 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); 1892 blk_queue_io_min(disk->queue, phys_bs); 1893 blk_queue_io_opt(disk->queue, io_opt); 1894 1895 /* 1896 * Register a metadata profile for PI, or the plain non-integrity NVMe 1897 * metadata masquerading as Type 0 if supported, otherwise reject block 1898 * I/O to namespaces with metadata except when the namespace supports 1899 * PI, as it can strip/insert in that case. 1900 */ 1901 if (ns->ms) { 1902 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 1903 (ns->features & NVME_NS_METADATA_SUPPORTED)) 1904 nvme_init_integrity(disk, ns, 1905 ns->ctrl->max_integrity_segments); 1906 else if (!nvme_ns_has_pi(ns)) 1907 capacity = 0; 1908 } 1909 1910 set_capacity_and_notify(disk, capacity); 1911 1912 nvme_config_discard(disk, ns); 1913 blk_queue_max_write_zeroes_sectors(disk->queue, 1914 ns->ctrl->max_zeroes_sectors); 1915 } 1916 1917 static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) 1918 { 1919 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags); 1920 } 1921 1922 static inline bool nvme_first_scan(struct gendisk *disk) 1923 { 1924 /* nvme_alloc_ns() scans the disk prior to adding it */ 1925 return !disk_live(disk); 1926 } 1927 1928 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) 1929 { 1930 struct nvme_ctrl *ctrl = ns->ctrl; 1931 u32 iob; 1932 1933 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 1934 is_power_of_2(ctrl->max_hw_sectors)) 1935 iob = ctrl->max_hw_sectors; 1936 else 1937 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); 1938 1939 if (!iob) 1940 return; 1941 1942 if (!is_power_of_2(iob)) { 1943 if (nvme_first_scan(ns->disk)) 1944 pr_warn("%s: ignoring unaligned IO boundary:%u\n", 1945 ns->disk->disk_name, iob); 1946 return; 1947 } 1948 1949 if (blk_queue_is_zoned(ns->disk->queue)) { 1950 if (nvme_first_scan(ns->disk)) 1951 pr_warn("%s: ignoring zoned namespace IO boundary\n", 1952 ns->disk->disk_name); 1953 return; 1954 } 1955 1956 blk_queue_chunk_sectors(ns->queue, iob); 1957 } 1958 1959 static int nvme_update_ns_info_generic(struct nvme_ns *ns, 1960 struct nvme_ns_info *info) 1961 { 1962 blk_mq_freeze_queue(ns->disk->queue); 1963 nvme_set_queue_limits(ns->ctrl, ns->queue); 1964 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 1965 blk_mq_unfreeze_queue(ns->disk->queue); 1966 1967 if (nvme_ns_head_multipath(ns->head)) { 1968 blk_mq_freeze_queue(ns->head->disk->queue); 1969 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 1970 nvme_mpath_revalidate_paths(ns); 1971 blk_stack_limits(&ns->head->disk->queue->limits, 1972 &ns->queue->limits, 0); 1973 ns->head->disk->flags |= GENHD_FL_HIDDEN; 1974 blk_mq_unfreeze_queue(ns->head->disk->queue); 1975 } 1976 1977 /* Hide the block-interface for these devices */ 1978 ns->disk->flags |= GENHD_FL_HIDDEN; 1979 set_bit(NVME_NS_READY, &ns->flags); 1980 1981 return 0; 1982 } 1983 1984 static int nvme_update_ns_info_block(struct nvme_ns *ns, 1985 struct nvme_ns_info *info) 1986 { 1987 struct nvme_id_ns *id; 1988 unsigned lbaf; 1989 int ret; 1990 1991 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id); 1992 if (ret) 1993 return ret; 1994 1995 blk_mq_freeze_queue(ns->disk->queue); 1996 lbaf = nvme_lbaf_index(id->flbas); 1997 ns->lba_shift = id->lbaf[lbaf].ds; 1998 nvme_set_queue_limits(ns->ctrl, ns->queue); 1999 2000 nvme_configure_metadata(ns, id); 2001 nvme_set_chunk_sectors(ns, id); 2002 nvme_update_disk_info(ns->disk, ns, id); 2003 2004 if (ns->head->ids.csi == NVME_CSI_ZNS) { 2005 ret = nvme_update_zone_info(ns, lbaf); 2006 if (ret) { 2007 blk_mq_unfreeze_queue(ns->disk->queue); 2008 goto out; 2009 } 2010 } 2011 2012 /* 2013 * Only set the DEAC bit if the device guarantees that reads from 2014 * deallocated data return zeroes. While the DEAC bit does not 2015 * require that, it must be a no-op if reads from deallocated data 2016 * do not return zeroes. 2017 */ 2018 if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) 2019 ns->features |= NVME_NS_DEAC; 2020 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 2021 set_bit(NVME_NS_READY, &ns->flags); 2022 blk_mq_unfreeze_queue(ns->disk->queue); 2023 2024 if (blk_queue_is_zoned(ns->queue)) { 2025 ret = nvme_revalidate_zones(ns); 2026 if (ret && !nvme_first_scan(ns->disk)) 2027 goto out; 2028 } 2029 2030 if (nvme_ns_head_multipath(ns->head)) { 2031 blk_mq_freeze_queue(ns->head->disk->queue); 2032 nvme_update_disk_info(ns->head->disk, ns, id); 2033 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 2034 nvme_mpath_revalidate_paths(ns); 2035 blk_stack_limits(&ns->head->disk->queue->limits, 2036 &ns->queue->limits, 0); 2037 disk_update_readahead(ns->head->disk); 2038 blk_mq_unfreeze_queue(ns->head->disk->queue); 2039 } 2040 2041 ret = 0; 2042 out: 2043 /* 2044 * If probing fails due an unsupported feature, hide the block device, 2045 * but still allow other access. 2046 */ 2047 if (ret == -ENODEV) { 2048 ns->disk->flags |= GENHD_FL_HIDDEN; 2049 set_bit(NVME_NS_READY, &ns->flags); 2050 ret = 0; 2051 } 2052 kfree(id); 2053 return ret; 2054 } 2055 2056 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) 2057 { 2058 switch (info->ids.csi) { 2059 case NVME_CSI_ZNS: 2060 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 2061 dev_info(ns->ctrl->device, 2062 "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", 2063 info->nsid); 2064 return nvme_update_ns_info_generic(ns, info); 2065 } 2066 return nvme_update_ns_info_block(ns, info); 2067 case NVME_CSI_NVM: 2068 return nvme_update_ns_info_block(ns, info); 2069 default: 2070 dev_info(ns->ctrl->device, 2071 "block device for nsid %u not supported (csi %u)\n", 2072 info->nsid, info->ids.csi); 2073 return nvme_update_ns_info_generic(ns, info); 2074 } 2075 } 2076 2077 static char nvme_pr_type(enum pr_type type) 2078 { 2079 switch (type) { 2080 case PR_WRITE_EXCLUSIVE: 2081 return 1; 2082 case PR_EXCLUSIVE_ACCESS: 2083 return 2; 2084 case PR_WRITE_EXCLUSIVE_REG_ONLY: 2085 return 3; 2086 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 2087 return 4; 2088 case PR_WRITE_EXCLUSIVE_ALL_REGS: 2089 return 5; 2090 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 2091 return 6; 2092 default: 2093 return 0; 2094 } 2095 } 2096 2097 static int nvme_send_ns_head_pr_command(struct block_device *bdev, 2098 struct nvme_command *c, u8 data[16]) 2099 { 2100 struct nvme_ns_head *head = bdev->bd_disk->private_data; 2101 int srcu_idx = srcu_read_lock(&head->srcu); 2102 struct nvme_ns *ns = nvme_find_path(head); 2103 int ret = -EWOULDBLOCK; 2104 2105 if (ns) { 2106 c->common.nsid = cpu_to_le32(ns->head->ns_id); 2107 ret = nvme_submit_sync_cmd(ns->queue, c, data, 16); 2108 } 2109 srcu_read_unlock(&head->srcu, srcu_idx); 2110 return ret; 2111 } 2112 2113 static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, 2114 u8 data[16]) 2115 { 2116 c->common.nsid = cpu_to_le32(ns->head->ns_id); 2117 return nvme_submit_sync_cmd(ns->queue, c, data, 16); 2118 } 2119 2120 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 2121 u64 key, u64 sa_key, u8 op) 2122 { 2123 struct nvme_command c = { }; 2124 u8 data[16] = { 0, }; 2125 2126 put_unaligned_le64(key, &data[0]); 2127 put_unaligned_le64(sa_key, &data[8]); 2128 2129 c.common.opcode = op; 2130 c.common.cdw10 = cpu_to_le32(cdw10); 2131 2132 if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && 2133 bdev->bd_disk->fops == &nvme_ns_head_ops) 2134 return nvme_send_ns_head_pr_command(bdev, &c, data); 2135 return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data); 2136 } 2137 2138 static int nvme_pr_register(struct block_device *bdev, u64 old, 2139 u64 new, unsigned flags) 2140 { 2141 u32 cdw10; 2142 2143 if (flags & ~PR_FL_IGNORE_KEY) 2144 return -EOPNOTSUPP; 2145 2146 cdw10 = old ? 2 : 0; 2147 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 2148 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 2149 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 2150 } 2151 2152 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 2153 enum pr_type type, unsigned flags) 2154 { 2155 u32 cdw10; 2156 2157 if (flags & ~PR_FL_IGNORE_KEY) 2158 return -EOPNOTSUPP; 2159 2160 cdw10 = nvme_pr_type(type) << 8; 2161 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 2162 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 2163 } 2164 2165 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 2166 enum pr_type type, bool abort) 2167 { 2168 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); 2169 2170 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 2171 } 2172 2173 static int nvme_pr_clear(struct block_device *bdev, u64 key) 2174 { 2175 u32 cdw10 = 1 | (key ? 0 : 1 << 3); 2176 2177 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 2178 } 2179 2180 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 2181 { 2182 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3); 2183 2184 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 2185 } 2186 2187 const struct pr_ops nvme_pr_ops = { 2188 .pr_register = nvme_pr_register, 2189 .pr_reserve = nvme_pr_reserve, 2190 .pr_release = nvme_pr_release, 2191 .pr_preempt = nvme_pr_preempt, 2192 .pr_clear = nvme_pr_clear, 2193 }; 2194 2195 #ifdef CONFIG_BLK_SED_OPAL 2196 static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 2197 bool send) 2198 { 2199 struct nvme_ctrl *ctrl = data; 2200 struct nvme_command cmd = { }; 2201 2202 if (send) 2203 cmd.common.opcode = nvme_admin_security_send; 2204 else 2205 cmd.common.opcode = nvme_admin_security_recv; 2206 cmd.common.nsid = 0; 2207 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 2208 cmd.common.cdw11 = cpu_to_le32(len); 2209 2210 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 2211 NVME_QID_ANY, 1, 0); 2212 } 2213 2214 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) 2215 { 2216 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) { 2217 if (!ctrl->opal_dev) 2218 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit); 2219 else if (was_suspended) 2220 opal_unlock_from_suspend(ctrl->opal_dev); 2221 } else { 2222 free_opal_dev(ctrl->opal_dev); 2223 ctrl->opal_dev = NULL; 2224 } 2225 } 2226 #else 2227 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) 2228 { 2229 } 2230 #endif /* CONFIG_BLK_SED_OPAL */ 2231 2232 #ifdef CONFIG_BLK_DEV_ZONED 2233 static int nvme_report_zones(struct gendisk *disk, sector_t sector, 2234 unsigned int nr_zones, report_zones_cb cb, void *data) 2235 { 2236 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, 2237 data); 2238 } 2239 #else 2240 #define nvme_report_zones NULL 2241 #endif /* CONFIG_BLK_DEV_ZONED */ 2242 2243 static const struct block_device_operations nvme_bdev_ops = { 2244 .owner = THIS_MODULE, 2245 .ioctl = nvme_ioctl, 2246 .compat_ioctl = blkdev_compat_ptr_ioctl, 2247 .open = nvme_open, 2248 .release = nvme_release, 2249 .getgeo = nvme_getgeo, 2250 .report_zones = nvme_report_zones, 2251 .pr_ops = &nvme_pr_ops, 2252 }; 2253 2254 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val, 2255 u32 timeout, const char *op) 2256 { 2257 unsigned long timeout_jiffies = jiffies + timeout * HZ; 2258 u32 csts; 2259 int ret; 2260 2261 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2262 if (csts == ~0) 2263 return -ENODEV; 2264 if ((csts & mask) == val) 2265 break; 2266 2267 usleep_range(1000, 2000); 2268 if (fatal_signal_pending(current)) 2269 return -EINTR; 2270 if (time_after(jiffies, timeout_jiffies)) { 2271 dev_err(ctrl->device, 2272 "Device not ready; aborting %s, CSTS=0x%x\n", 2273 op, csts); 2274 return -ENODEV; 2275 } 2276 } 2277 2278 return ret; 2279 } 2280 2281 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown) 2282 { 2283 int ret; 2284 2285 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2286 if (shutdown) 2287 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 2288 else 2289 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 2290 2291 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2292 if (ret) 2293 return ret; 2294 2295 if (shutdown) { 2296 return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK, 2297 NVME_CSTS_SHST_CMPLT, 2298 ctrl->shutdown_timeout, "shutdown"); 2299 } 2300 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 2301 msleep(NVME_QUIRK_DELAY_AMOUNT); 2302 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0, 2303 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset"); 2304 } 2305 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 2306 2307 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) 2308 { 2309 unsigned dev_page_min; 2310 u32 timeout; 2311 int ret; 2312 2313 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2314 if (ret) { 2315 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2316 return ret; 2317 } 2318 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2319 2320 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { 2321 dev_err(ctrl->device, 2322 "Minimum device page size %u too large for host (%u)\n", 2323 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); 2324 return -ENODEV; 2325 } 2326 2327 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) 2328 ctrl->ctrl_config = NVME_CC_CSS_CSI; 2329 else 2330 ctrl->ctrl_config = NVME_CC_CSS_NVM; 2331 2332 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { 2333 u32 crto; 2334 2335 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); 2336 if (ret) { 2337 dev_err(ctrl->device, "Reading CRTO failed (%d)\n", 2338 ret); 2339 return ret; 2340 } 2341 2342 if (ctrl->cap & NVME_CAP_CRMS_CRIMS) { 2343 ctrl->ctrl_config |= NVME_CC_CRIME; 2344 timeout = NVME_CRTO_CRIMT(crto); 2345 } else { 2346 timeout = NVME_CRTO_CRWMT(crto); 2347 } 2348 } else { 2349 timeout = NVME_CAP_TIMEOUT(ctrl->cap); 2350 } 2351 2352 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 2353 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 2354 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 2355 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2356 if (ret) 2357 return ret; 2358 2359 /* Flush write to device (required if transport is PCI) */ 2360 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); 2361 if (ret) 2362 return ret; 2363 2364 ctrl->ctrl_config |= NVME_CC_ENABLE; 2365 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2366 if (ret) 2367 return ret; 2368 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY, 2369 (timeout + 1) / 2, "initialisation"); 2370 } 2371 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 2372 2373 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 2374 { 2375 __le64 ts; 2376 int ret; 2377 2378 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 2379 return 0; 2380 2381 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 2382 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 2383 NULL); 2384 if (ret) 2385 dev_warn_once(ctrl->device, 2386 "could not set timestamp (%d)\n", ret); 2387 return ret; 2388 } 2389 2390 static int nvme_configure_host_options(struct nvme_ctrl *ctrl) 2391 { 2392 struct nvme_feat_host_behavior *host; 2393 u8 acre = 0, lbafee = 0; 2394 int ret; 2395 2396 /* Don't bother enabling the feature if retry delay is not reported */ 2397 if (ctrl->crdt[0]) 2398 acre = NVME_ENABLE_ACRE; 2399 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) 2400 lbafee = NVME_ENABLE_LBAFEE; 2401 2402 if (!acre && !lbafee) 2403 return 0; 2404 2405 host = kzalloc(sizeof(*host), GFP_KERNEL); 2406 if (!host) 2407 return 0; 2408 2409 host->acre = acre; 2410 host->lbafee = lbafee; 2411 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2412 host, sizeof(*host), NULL); 2413 kfree(host); 2414 return ret; 2415 } 2416 2417 /* 2418 * The function checks whether the given total (exlat + enlat) latency of 2419 * a power state allows the latter to be used as an APST transition target. 2420 * It does so by comparing the latency to the primary and secondary latency 2421 * tolerances defined by module params. If there's a match, the corresponding 2422 * timeout value is returned and the matching tolerance index (1 or 2) is 2423 * reported. 2424 */ 2425 static bool nvme_apst_get_transition_time(u64 total_latency, 2426 u64 *transition_time, unsigned *last_index) 2427 { 2428 if (total_latency <= apst_primary_latency_tol_us) { 2429 if (*last_index == 1) 2430 return false; 2431 *last_index = 1; 2432 *transition_time = apst_primary_timeout_ms; 2433 return true; 2434 } 2435 if (apst_secondary_timeout_ms && 2436 total_latency <= apst_secondary_latency_tol_us) { 2437 if (*last_index <= 2) 2438 return false; 2439 *last_index = 2; 2440 *transition_time = apst_secondary_timeout_ms; 2441 return true; 2442 } 2443 return false; 2444 } 2445 2446 /* 2447 * APST (Autonomous Power State Transition) lets us program a table of power 2448 * state transitions that the controller will perform automatically. 2449 * 2450 * Depending on module params, one of the two supported techniques will be used: 2451 * 2452 * - If the parameters provide explicit timeouts and tolerances, they will be 2453 * used to build a table with up to 2 non-operational states to transition to. 2454 * The default parameter values were selected based on the values used by 2455 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic 2456 * regeneration of the APST table in the event of switching between external 2457 * and battery power, the timeouts and tolerances reflect a compromise 2458 * between values used by Microsoft for AC and battery scenarios. 2459 * - If not, we'll configure the table with a simple heuristic: we are willing 2460 * to spend at most 2% of the time transitioning between power states. 2461 * Therefore, when running in any given state, we will enter the next 2462 * lower-power non-operational state after waiting 50 * (enlat + exlat) 2463 * microseconds, as long as that state's exit latency is under the requested 2464 * maximum latency. 2465 * 2466 * We will not autonomously enter any non-operational state for which the total 2467 * latency exceeds ps_max_latency_us. 2468 * 2469 * Users can set ps_max_latency_us to zero to turn off APST. 2470 */ 2471 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2472 { 2473 struct nvme_feat_auto_pst *table; 2474 unsigned apste = 0; 2475 u64 max_lat_us = 0; 2476 __le64 target = 0; 2477 int max_ps = -1; 2478 int state; 2479 int ret; 2480 unsigned last_lt_index = UINT_MAX; 2481 2482 /* 2483 * If APST isn't supported or if we haven't been initialized yet, 2484 * then don't do anything. 2485 */ 2486 if (!ctrl->apsta) 2487 return 0; 2488 2489 if (ctrl->npss > 31) { 2490 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2491 return 0; 2492 } 2493 2494 table = kzalloc(sizeof(*table), GFP_KERNEL); 2495 if (!table) 2496 return 0; 2497 2498 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2499 /* Turn off APST. */ 2500 dev_dbg(ctrl->device, "APST disabled\n"); 2501 goto done; 2502 } 2503 2504 /* 2505 * Walk through all states from lowest- to highest-power. 2506 * According to the spec, lower-numbered states use more power. NPSS, 2507 * despite the name, is the index of the lowest-power state, not the 2508 * number of states. 2509 */ 2510 for (state = (int)ctrl->npss; state >= 0; state--) { 2511 u64 total_latency_us, exit_latency_us, transition_ms; 2512 2513 if (target) 2514 table->entries[state] = target; 2515 2516 /* 2517 * Don't allow transitions to the deepest state if it's quirked 2518 * off. 2519 */ 2520 if (state == ctrl->npss && 2521 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2522 continue; 2523 2524 /* 2525 * Is this state a useful non-operational state for higher-power 2526 * states to autonomously transition to? 2527 */ 2528 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) 2529 continue; 2530 2531 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2532 if (exit_latency_us > ctrl->ps_max_latency_us) 2533 continue; 2534 2535 total_latency_us = exit_latency_us + 2536 le32_to_cpu(ctrl->psd[state].entry_lat); 2537 2538 /* 2539 * This state is good. It can be used as the APST idle target 2540 * for higher power states. 2541 */ 2542 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { 2543 if (!nvme_apst_get_transition_time(total_latency_us, 2544 &transition_ms, &last_lt_index)) 2545 continue; 2546 } else { 2547 transition_ms = total_latency_us + 19; 2548 do_div(transition_ms, 20); 2549 if (transition_ms > (1 << 24) - 1) 2550 transition_ms = (1 << 24) - 1; 2551 } 2552 2553 target = cpu_to_le64((state << 3) | (transition_ms << 8)); 2554 if (max_ps == -1) 2555 max_ps = state; 2556 if (total_latency_us > max_lat_us) 2557 max_lat_us = total_latency_us; 2558 } 2559 2560 if (max_ps == -1) 2561 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2562 else 2563 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2564 max_ps, max_lat_us, (int)sizeof(*table), table); 2565 apste = 1; 2566 2567 done: 2568 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2569 table, sizeof(*table), NULL); 2570 if (ret) 2571 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2572 kfree(table); 2573 return ret; 2574 } 2575 2576 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2577 { 2578 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2579 u64 latency; 2580 2581 switch (val) { 2582 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2583 case PM_QOS_LATENCY_ANY: 2584 latency = U64_MAX; 2585 break; 2586 2587 default: 2588 latency = val; 2589 } 2590 2591 if (ctrl->ps_max_latency_us != latency) { 2592 ctrl->ps_max_latency_us = latency; 2593 if (ctrl->state == NVME_CTRL_LIVE) 2594 nvme_configure_apst(ctrl); 2595 } 2596 } 2597 2598 struct nvme_core_quirk_entry { 2599 /* 2600 * NVMe model and firmware strings are padded with spaces. For 2601 * simplicity, strings in the quirk table are padded with NULLs 2602 * instead. 2603 */ 2604 u16 vid; 2605 const char *mn; 2606 const char *fr; 2607 unsigned long quirks; 2608 }; 2609 2610 static const struct nvme_core_quirk_entry core_quirks[] = { 2611 { 2612 /* 2613 * This Toshiba device seems to die using any APST states. See: 2614 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2615 */ 2616 .vid = 0x1179, 2617 .mn = "THNSF5256GPUK TOSHIBA", 2618 .quirks = NVME_QUIRK_NO_APST, 2619 }, 2620 { 2621 /* 2622 * This LiteON CL1-3D*-Q11 firmware version has a race 2623 * condition associated with actions related to suspend to idle 2624 * LiteON has resolved the problem in future firmware 2625 */ 2626 .vid = 0x14a4, 2627 .fr = "22301111", 2628 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2629 }, 2630 { 2631 /* 2632 * This Kioxia CD6-V Series / HPE PE8030 device times out and 2633 * aborts I/O during any load, but more easily reproducible 2634 * with discards (fstrim). 2635 * 2636 * The device is left in a state where it is also not possible 2637 * to use "nvme set-feature" to disable APST, but booting with 2638 * nvme_core.default_ps_max_latency=0 works. 2639 */ 2640 .vid = 0x1e0f, 2641 .mn = "KCD6XVUL6T40", 2642 .quirks = NVME_QUIRK_NO_APST, 2643 }, 2644 { 2645 /* 2646 * The external Samsung X5 SSD fails initialization without a 2647 * delay before checking if it is ready and has a whole set of 2648 * other problems. To make this even more interesting, it 2649 * shares the PCI ID with internal Samsung 970 Evo Plus that 2650 * does not need or want these quirks. 2651 */ 2652 .vid = 0x144d, 2653 .mn = "Samsung Portable SSD X5", 2654 .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 2655 NVME_QUIRK_NO_DEEPEST_PS | 2656 NVME_QUIRK_IGNORE_DEV_SUBNQN, 2657 } 2658 }; 2659 2660 /* match is null-terminated but idstr is space-padded. */ 2661 static bool string_matches(const char *idstr, const char *match, size_t len) 2662 { 2663 size_t matchlen; 2664 2665 if (!match) 2666 return true; 2667 2668 matchlen = strlen(match); 2669 WARN_ON_ONCE(matchlen > len); 2670 2671 if (memcmp(idstr, match, matchlen)) 2672 return false; 2673 2674 for (; matchlen < len; matchlen++) 2675 if (idstr[matchlen] != ' ') 2676 return false; 2677 2678 return true; 2679 } 2680 2681 static bool quirk_matches(const struct nvme_id_ctrl *id, 2682 const struct nvme_core_quirk_entry *q) 2683 { 2684 return q->vid == le16_to_cpu(id->vid) && 2685 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2686 string_matches(id->fr, q->fr, sizeof(id->fr)); 2687 } 2688 2689 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2690 struct nvme_id_ctrl *id) 2691 { 2692 size_t nqnlen; 2693 int off; 2694 2695 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2696 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2697 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2698 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2699 return; 2700 } 2701 2702 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2703 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2704 } 2705 2706 /* 2707 * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe 2708 * Base Specification 2.0. It is slightly different from the format 2709 * specified there due to historic reasons, and we can't change it now. 2710 */ 2711 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2712 "nqn.2014.08.org.nvmexpress:%04x%04x", 2713 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2714 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2715 off += sizeof(id->sn); 2716 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2717 off += sizeof(id->mn); 2718 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2719 } 2720 2721 static void nvme_release_subsystem(struct device *dev) 2722 { 2723 struct nvme_subsystem *subsys = 2724 container_of(dev, struct nvme_subsystem, dev); 2725 2726 if (subsys->instance >= 0) 2727 ida_free(&nvme_instance_ida, subsys->instance); 2728 kfree(subsys); 2729 } 2730 2731 static void nvme_destroy_subsystem(struct kref *ref) 2732 { 2733 struct nvme_subsystem *subsys = 2734 container_of(ref, struct nvme_subsystem, ref); 2735 2736 mutex_lock(&nvme_subsystems_lock); 2737 list_del(&subsys->entry); 2738 mutex_unlock(&nvme_subsystems_lock); 2739 2740 ida_destroy(&subsys->ns_ida); 2741 device_del(&subsys->dev); 2742 put_device(&subsys->dev); 2743 } 2744 2745 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2746 { 2747 kref_put(&subsys->ref, nvme_destroy_subsystem); 2748 } 2749 2750 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2751 { 2752 struct nvme_subsystem *subsys; 2753 2754 lockdep_assert_held(&nvme_subsystems_lock); 2755 2756 /* 2757 * Fail matches for discovery subsystems. This results 2758 * in each discovery controller bound to a unique subsystem. 2759 * This avoids issues with validating controller values 2760 * that can only be true when there is a single unique subsystem. 2761 * There may be multiple and completely independent entities 2762 * that provide discovery controllers. 2763 */ 2764 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) 2765 return NULL; 2766 2767 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2768 if (strcmp(subsys->subnqn, subsysnqn)) 2769 continue; 2770 if (!kref_get_unless_zero(&subsys->ref)) 2771 continue; 2772 return subsys; 2773 } 2774 2775 return NULL; 2776 } 2777 2778 #define SUBSYS_ATTR_RO(_name, _mode, _show) \ 2779 struct device_attribute subsys_attr_##_name = \ 2780 __ATTR(_name, _mode, _show, NULL) 2781 2782 static ssize_t nvme_subsys_show_nqn(struct device *dev, 2783 struct device_attribute *attr, 2784 char *buf) 2785 { 2786 struct nvme_subsystem *subsys = 2787 container_of(dev, struct nvme_subsystem, dev); 2788 2789 return sysfs_emit(buf, "%s\n", subsys->subnqn); 2790 } 2791 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2792 2793 static ssize_t nvme_subsys_show_type(struct device *dev, 2794 struct device_attribute *attr, 2795 char *buf) 2796 { 2797 struct nvme_subsystem *subsys = 2798 container_of(dev, struct nvme_subsystem, dev); 2799 2800 switch (subsys->subtype) { 2801 case NVME_NQN_DISC: 2802 return sysfs_emit(buf, "discovery\n"); 2803 case NVME_NQN_NVME: 2804 return sysfs_emit(buf, "nvm\n"); 2805 default: 2806 return sysfs_emit(buf, "reserved\n"); 2807 } 2808 } 2809 static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type); 2810 2811 #define nvme_subsys_show_str_function(field) \ 2812 static ssize_t subsys_##field##_show(struct device *dev, \ 2813 struct device_attribute *attr, char *buf) \ 2814 { \ 2815 struct nvme_subsystem *subsys = \ 2816 container_of(dev, struct nvme_subsystem, dev); \ 2817 return sysfs_emit(buf, "%.*s\n", \ 2818 (int)sizeof(subsys->field), subsys->field); \ 2819 } \ 2820 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 2821 2822 nvme_subsys_show_str_function(model); 2823 nvme_subsys_show_str_function(serial); 2824 nvme_subsys_show_str_function(firmware_rev); 2825 2826 static struct attribute *nvme_subsys_attrs[] = { 2827 &subsys_attr_model.attr, 2828 &subsys_attr_serial.attr, 2829 &subsys_attr_firmware_rev.attr, 2830 &subsys_attr_subsysnqn.attr, 2831 &subsys_attr_subsystype.attr, 2832 #ifdef CONFIG_NVME_MULTIPATH 2833 &subsys_attr_iopolicy.attr, 2834 #endif 2835 NULL, 2836 }; 2837 2838 static const struct attribute_group nvme_subsys_attrs_group = { 2839 .attrs = nvme_subsys_attrs, 2840 }; 2841 2842 static const struct attribute_group *nvme_subsys_attrs_groups[] = { 2843 &nvme_subsys_attrs_group, 2844 NULL, 2845 }; 2846 2847 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) 2848 { 2849 return ctrl->opts && ctrl->opts->discovery_nqn; 2850 } 2851 2852 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 2853 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2854 { 2855 struct nvme_ctrl *tmp; 2856 2857 lockdep_assert_held(&nvme_subsystems_lock); 2858 2859 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { 2860 if (nvme_state_terminal(tmp)) 2861 continue; 2862 2863 if (tmp->cntlid == ctrl->cntlid) { 2864 dev_err(ctrl->device, 2865 "Duplicate cntlid %u with %s, subsys %s, rejecting\n", 2866 ctrl->cntlid, dev_name(tmp->device), 2867 subsys->subnqn); 2868 return false; 2869 } 2870 2871 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || 2872 nvme_discovery_ctrl(ctrl)) 2873 continue; 2874 2875 dev_err(ctrl->device, 2876 "Subsystem does not support multiple controllers\n"); 2877 return false; 2878 } 2879 2880 return true; 2881 } 2882 2883 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2884 { 2885 struct nvme_subsystem *subsys, *found; 2886 int ret; 2887 2888 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2889 if (!subsys) 2890 return -ENOMEM; 2891 2892 subsys->instance = -1; 2893 mutex_init(&subsys->lock); 2894 kref_init(&subsys->ref); 2895 INIT_LIST_HEAD(&subsys->ctrls); 2896 INIT_LIST_HEAD(&subsys->nsheads); 2897 nvme_init_subnqn(subsys, ctrl, id); 2898 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2899 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2900 subsys->vendor_id = le16_to_cpu(id->vid); 2901 subsys->cmic = id->cmic; 2902 2903 /* Versions prior to 1.4 don't necessarily report a valid type */ 2904 if (id->cntrltype == NVME_CTRL_DISC || 2905 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) 2906 subsys->subtype = NVME_NQN_DISC; 2907 else 2908 subsys->subtype = NVME_NQN_NVME; 2909 2910 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { 2911 dev_err(ctrl->device, 2912 "Subsystem %s is not a discovery controller", 2913 subsys->subnqn); 2914 kfree(subsys); 2915 return -EINVAL; 2916 } 2917 subsys->awupf = le16_to_cpu(id->awupf); 2918 nvme_mpath_default_iopolicy(subsys); 2919 2920 subsys->dev.class = nvme_subsys_class; 2921 subsys->dev.release = nvme_release_subsystem; 2922 subsys->dev.groups = nvme_subsys_attrs_groups; 2923 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); 2924 device_initialize(&subsys->dev); 2925 2926 mutex_lock(&nvme_subsystems_lock); 2927 found = __nvme_find_get_subsystem(subsys->subnqn); 2928 if (found) { 2929 put_device(&subsys->dev); 2930 subsys = found; 2931 2932 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 2933 ret = -EINVAL; 2934 goto out_put_subsystem; 2935 } 2936 } else { 2937 ret = device_add(&subsys->dev); 2938 if (ret) { 2939 dev_err(ctrl->device, 2940 "failed to register subsystem device.\n"); 2941 put_device(&subsys->dev); 2942 goto out_unlock; 2943 } 2944 ida_init(&subsys->ns_ida); 2945 list_add_tail(&subsys->entry, &nvme_subsystems); 2946 } 2947 2948 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2949 dev_name(ctrl->device)); 2950 if (ret) { 2951 dev_err(ctrl->device, 2952 "failed to create sysfs link from subsystem.\n"); 2953 goto out_put_subsystem; 2954 } 2955 2956 if (!found) 2957 subsys->instance = ctrl->instance; 2958 ctrl->subsys = subsys; 2959 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2960 mutex_unlock(&nvme_subsystems_lock); 2961 return 0; 2962 2963 out_put_subsystem: 2964 nvme_put_subsystem(subsys); 2965 out_unlock: 2966 mutex_unlock(&nvme_subsystems_lock); 2967 return ret; 2968 } 2969 2970 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 2971 void *log, size_t size, u64 offset) 2972 { 2973 struct nvme_command c = { }; 2974 u32 dwlen = nvme_bytes_to_numd(size); 2975 2976 c.get_log_page.opcode = nvme_admin_get_log_page; 2977 c.get_log_page.nsid = cpu_to_le32(nsid); 2978 c.get_log_page.lid = log_page; 2979 c.get_log_page.lsp = lsp; 2980 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 2981 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 2982 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 2983 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 2984 c.get_log_page.csi = csi; 2985 2986 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2987 } 2988 2989 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, 2990 struct nvme_effects_log **log) 2991 { 2992 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); 2993 int ret; 2994 2995 if (cel) 2996 goto out; 2997 2998 cel = kzalloc(sizeof(*cel), GFP_KERNEL); 2999 if (!cel) 3000 return -ENOMEM; 3001 3002 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, 3003 cel, sizeof(*cel), 0); 3004 if (ret) { 3005 kfree(cel); 3006 return ret; 3007 } 3008 3009 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); 3010 out: 3011 *log = cel; 3012 return 0; 3013 } 3014 3015 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units) 3016 { 3017 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; 3018 3019 if (check_shl_overflow(1U, units + page_shift - 9, &val)) 3020 return UINT_MAX; 3021 return val; 3022 } 3023 3024 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) 3025 { 3026 struct nvme_command c = { }; 3027 struct nvme_id_ctrl_nvm *id; 3028 int ret; 3029 3030 if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { 3031 ctrl->max_discard_sectors = UINT_MAX; 3032 ctrl->max_discard_segments = NVME_DSM_MAX_RANGES; 3033 } else { 3034 ctrl->max_discard_sectors = 0; 3035 ctrl->max_discard_segments = 0; 3036 } 3037 3038 /* 3039 * Even though NVMe spec explicitly states that MDTS is not applicable 3040 * to the write-zeroes, we are cautious and limit the size to the 3041 * controllers max_hw_sectors value, which is based on the MDTS field 3042 * and possibly other limiting factors. 3043 */ 3044 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && 3045 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 3046 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; 3047 else 3048 ctrl->max_zeroes_sectors = 0; 3049 3050 if (nvme_ctrl_limited_cns(ctrl)) 3051 return 0; 3052 3053 id = kzalloc(sizeof(*id), GFP_KERNEL); 3054 if (!id) 3055 return -ENOMEM; 3056 3057 c.identify.opcode = nvme_admin_identify; 3058 c.identify.cns = NVME_ID_CNS_CS_CTRL; 3059 c.identify.csi = NVME_CSI_NVM; 3060 3061 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 3062 if (ret) 3063 goto free_data; 3064 3065 if (id->dmrl) 3066 ctrl->max_discard_segments = id->dmrl; 3067 ctrl->dmrsl = le32_to_cpu(id->dmrsl); 3068 if (id->wzsl) 3069 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); 3070 3071 free_data: 3072 kfree(id); 3073 return ret; 3074 } 3075 3076 static int nvme_init_identify(struct nvme_ctrl *ctrl) 3077 { 3078 struct nvme_id_ctrl *id; 3079 u32 max_hw_sectors; 3080 bool prev_apst_enabled; 3081 int ret; 3082 3083 ret = nvme_identify_ctrl(ctrl, &id); 3084 if (ret) { 3085 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 3086 return -EIO; 3087 } 3088 3089 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 3090 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); 3091 if (ret < 0) 3092 goto out_free; 3093 } 3094 3095 if (!(ctrl->ops->flags & NVME_F_FABRICS)) 3096 ctrl->cntlid = le16_to_cpu(id->cntlid); 3097 3098 if (!ctrl->identified) { 3099 unsigned int i; 3100 3101 /* 3102 * Check for quirks. Quirk can depend on firmware version, 3103 * so, in principle, the set of quirks present can change 3104 * across a reset. As a possible future enhancement, we 3105 * could re-scan for quirks every time we reinitialize 3106 * the device, but we'd have to make sure that the driver 3107 * behaves intelligently if the quirks change. 3108 */ 3109 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 3110 if (quirk_matches(id, &core_quirks[i])) 3111 ctrl->quirks |= core_quirks[i].quirks; 3112 } 3113 3114 ret = nvme_init_subsystem(ctrl, id); 3115 if (ret) 3116 goto out_free; 3117 } 3118 memcpy(ctrl->subsys->firmware_rev, id->fr, 3119 sizeof(ctrl->subsys->firmware_rev)); 3120 3121 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 3122 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 3123 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 3124 } 3125 3126 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 3127 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 3128 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 3129 3130 ctrl->oacs = le16_to_cpu(id->oacs); 3131 ctrl->oncs = le16_to_cpu(id->oncs); 3132 ctrl->mtfa = le16_to_cpu(id->mtfa); 3133 ctrl->oaes = le32_to_cpu(id->oaes); 3134 ctrl->wctemp = le16_to_cpu(id->wctemp); 3135 ctrl->cctemp = le16_to_cpu(id->cctemp); 3136 3137 atomic_set(&ctrl->abort_limit, id->acl + 1); 3138 ctrl->vwc = id->vwc; 3139 if (id->mdts) 3140 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); 3141 else 3142 max_hw_sectors = UINT_MAX; 3143 ctrl->max_hw_sectors = 3144 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 3145 3146 nvme_set_queue_limits(ctrl, ctrl->admin_q); 3147 ctrl->sgls = le32_to_cpu(id->sgls); 3148 ctrl->kas = le16_to_cpu(id->kas); 3149 ctrl->max_namespaces = le32_to_cpu(id->mnan); 3150 ctrl->ctratt = le32_to_cpu(id->ctratt); 3151 3152 ctrl->cntrltype = id->cntrltype; 3153 ctrl->dctype = id->dctype; 3154 3155 if (id->rtd3e) { 3156 /* us -> s */ 3157 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; 3158 3159 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 3160 shutdown_timeout, 60); 3161 3162 if (ctrl->shutdown_timeout != shutdown_timeout) 3163 dev_info(ctrl->device, 3164 "Shutdown timeout set to %u seconds\n", 3165 ctrl->shutdown_timeout); 3166 } else 3167 ctrl->shutdown_timeout = shutdown_timeout; 3168 3169 ctrl->npss = id->npss; 3170 ctrl->apsta = id->apsta; 3171 prev_apst_enabled = ctrl->apst_enabled; 3172 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 3173 if (force_apst && id->apsta) { 3174 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 3175 ctrl->apst_enabled = true; 3176 } else { 3177 ctrl->apst_enabled = false; 3178 } 3179 } else { 3180 ctrl->apst_enabled = id->apsta; 3181 } 3182 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 3183 3184 if (ctrl->ops->flags & NVME_F_FABRICS) { 3185 ctrl->icdoff = le16_to_cpu(id->icdoff); 3186 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 3187 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 3188 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 3189 3190 /* 3191 * In fabrics we need to verify the cntlid matches the 3192 * admin connect 3193 */ 3194 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 3195 dev_err(ctrl->device, 3196 "Mismatching cntlid: Connect %u vs Identify " 3197 "%u, rejecting\n", 3198 ctrl->cntlid, le16_to_cpu(id->cntlid)); 3199 ret = -EINVAL; 3200 goto out_free; 3201 } 3202 3203 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { 3204 dev_err(ctrl->device, 3205 "keep-alive support is mandatory for fabrics\n"); 3206 ret = -EINVAL; 3207 goto out_free; 3208 } 3209 } else { 3210 ctrl->hmpre = le32_to_cpu(id->hmpre); 3211 ctrl->hmmin = le32_to_cpu(id->hmmin); 3212 ctrl->hmminds = le32_to_cpu(id->hmminds); 3213 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 3214 } 3215 3216 ret = nvme_mpath_init_identify(ctrl, id); 3217 if (ret < 0) 3218 goto out_free; 3219 3220 if (ctrl->apst_enabled && !prev_apst_enabled) 3221 dev_pm_qos_expose_latency_tolerance(ctrl->device); 3222 else if (!ctrl->apst_enabled && prev_apst_enabled) 3223 dev_pm_qos_hide_latency_tolerance(ctrl->device); 3224 3225 out_free: 3226 kfree(id); 3227 return ret; 3228 } 3229 3230 /* 3231 * Initialize the cached copies of the Identify data and various controller 3232 * register in our nvme_ctrl structure. This should be called as soon as 3233 * the admin queue is fully up and running. 3234 */ 3235 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) 3236 { 3237 int ret; 3238 3239 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 3240 if (ret) { 3241 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 3242 return ret; 3243 } 3244 3245 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); 3246 3247 if (ctrl->vs >= NVME_VS(1, 1, 0)) 3248 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); 3249 3250 ret = nvme_init_identify(ctrl); 3251 if (ret) 3252 return ret; 3253 3254 ret = nvme_configure_apst(ctrl); 3255 if (ret < 0) 3256 return ret; 3257 3258 ret = nvme_configure_timestamp(ctrl); 3259 if (ret < 0) 3260 return ret; 3261 3262 ret = nvme_configure_host_options(ctrl); 3263 if (ret < 0) 3264 return ret; 3265 3266 nvme_configure_opal(ctrl, was_suspended); 3267 3268 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { 3269 /* 3270 * Do not return errors unless we are in a controller reset, 3271 * the controller works perfectly fine without hwmon. 3272 */ 3273 ret = nvme_hwmon_init(ctrl); 3274 if (ret == -EINTR) 3275 return ret; 3276 } 3277 3278 ctrl->identified = true; 3279 3280 return 0; 3281 } 3282 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); 3283 3284 static int nvme_dev_open(struct inode *inode, struct file *file) 3285 { 3286 struct nvme_ctrl *ctrl = 3287 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3288 3289 switch (ctrl->state) { 3290 case NVME_CTRL_LIVE: 3291 break; 3292 default: 3293 return -EWOULDBLOCK; 3294 } 3295 3296 nvme_get_ctrl(ctrl); 3297 if (!try_module_get(ctrl->ops->module)) { 3298 nvme_put_ctrl(ctrl); 3299 return -EINVAL; 3300 } 3301 3302 file->private_data = ctrl; 3303 return 0; 3304 } 3305 3306 static int nvme_dev_release(struct inode *inode, struct file *file) 3307 { 3308 struct nvme_ctrl *ctrl = 3309 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3310 3311 module_put(ctrl->ops->module); 3312 nvme_put_ctrl(ctrl); 3313 return 0; 3314 } 3315 3316 static const struct file_operations nvme_dev_fops = { 3317 .owner = THIS_MODULE, 3318 .open = nvme_dev_open, 3319 .release = nvme_dev_release, 3320 .unlocked_ioctl = nvme_dev_ioctl, 3321 .compat_ioctl = compat_ptr_ioctl, 3322 .uring_cmd = nvme_dev_uring_cmd, 3323 }; 3324 3325 static ssize_t nvme_sysfs_reset(struct device *dev, 3326 struct device_attribute *attr, const char *buf, 3327 size_t count) 3328 { 3329 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3330 int ret; 3331 3332 ret = nvme_reset_ctrl_sync(ctrl); 3333 if (ret < 0) 3334 return ret; 3335 return count; 3336 } 3337 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 3338 3339 static ssize_t nvme_sysfs_rescan(struct device *dev, 3340 struct device_attribute *attr, const char *buf, 3341 size_t count) 3342 { 3343 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3344 3345 nvme_queue_scan(ctrl); 3346 return count; 3347 } 3348 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 3349 3350 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 3351 { 3352 struct gendisk *disk = dev_to_disk(dev); 3353 3354 if (disk->fops == &nvme_bdev_ops) 3355 return nvme_get_ns_from_dev(dev)->head; 3356 else 3357 return disk->private_data; 3358 } 3359 3360 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 3361 char *buf) 3362 { 3363 struct nvme_ns_head *head = dev_to_ns_head(dev); 3364 struct nvme_ns_ids *ids = &head->ids; 3365 struct nvme_subsystem *subsys = head->subsys; 3366 int serial_len = sizeof(subsys->serial); 3367 int model_len = sizeof(subsys->model); 3368 3369 if (!uuid_is_null(&ids->uuid)) 3370 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); 3371 3372 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3373 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); 3374 3375 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3376 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); 3377 3378 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 3379 subsys->serial[serial_len - 1] == '\0')) 3380 serial_len--; 3381 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 3382 subsys->model[model_len - 1] == '\0')) 3383 model_len--; 3384 3385 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 3386 serial_len, subsys->serial, model_len, subsys->model, 3387 head->ns_id); 3388 } 3389 static DEVICE_ATTR_RO(wwid); 3390 3391 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 3392 char *buf) 3393 { 3394 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 3395 } 3396 static DEVICE_ATTR_RO(nguid); 3397 3398 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 3399 char *buf) 3400 { 3401 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3402 3403 /* For backward compatibility expose the NGUID to userspace if 3404 * we have no UUID set 3405 */ 3406 if (uuid_is_null(&ids->uuid)) { 3407 dev_warn_ratelimited(dev, 3408 "No UUID available providing old NGUID\n"); 3409 return sysfs_emit(buf, "%pU\n", ids->nguid); 3410 } 3411 return sysfs_emit(buf, "%pU\n", &ids->uuid); 3412 } 3413 static DEVICE_ATTR_RO(uuid); 3414 3415 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 3416 char *buf) 3417 { 3418 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 3419 } 3420 static DEVICE_ATTR_RO(eui); 3421 3422 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 3423 char *buf) 3424 { 3425 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 3426 } 3427 static DEVICE_ATTR_RO(nsid); 3428 3429 static struct attribute *nvme_ns_id_attrs[] = { 3430 &dev_attr_wwid.attr, 3431 &dev_attr_uuid.attr, 3432 &dev_attr_nguid.attr, 3433 &dev_attr_eui.attr, 3434 &dev_attr_nsid.attr, 3435 #ifdef CONFIG_NVME_MULTIPATH 3436 &dev_attr_ana_grpid.attr, 3437 &dev_attr_ana_state.attr, 3438 #endif 3439 NULL, 3440 }; 3441 3442 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, 3443 struct attribute *a, int n) 3444 { 3445 struct device *dev = container_of(kobj, struct device, kobj); 3446 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3447 3448 if (a == &dev_attr_uuid.attr) { 3449 if (uuid_is_null(&ids->uuid) && 3450 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3451 return 0; 3452 } 3453 if (a == &dev_attr_nguid.attr) { 3454 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3455 return 0; 3456 } 3457 if (a == &dev_attr_eui.attr) { 3458 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3459 return 0; 3460 } 3461 #ifdef CONFIG_NVME_MULTIPATH 3462 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { 3463 if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */ 3464 return 0; 3465 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) 3466 return 0; 3467 } 3468 #endif 3469 return a->mode; 3470 } 3471 3472 static const struct attribute_group nvme_ns_id_attr_group = { 3473 .attrs = nvme_ns_id_attrs, 3474 .is_visible = nvme_ns_id_attrs_are_visible, 3475 }; 3476 3477 const struct attribute_group *nvme_ns_id_attr_groups[] = { 3478 &nvme_ns_id_attr_group, 3479 NULL, 3480 }; 3481 3482 #define nvme_show_str_function(field) \ 3483 static ssize_t field##_show(struct device *dev, \ 3484 struct device_attribute *attr, char *buf) \ 3485 { \ 3486 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3487 return sysfs_emit(buf, "%.*s\n", \ 3488 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 3489 } \ 3490 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3491 3492 nvme_show_str_function(model); 3493 nvme_show_str_function(serial); 3494 nvme_show_str_function(firmware_rev); 3495 3496 #define nvme_show_int_function(field) \ 3497 static ssize_t field##_show(struct device *dev, \ 3498 struct device_attribute *attr, char *buf) \ 3499 { \ 3500 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3501 return sysfs_emit(buf, "%d\n", ctrl->field); \ 3502 } \ 3503 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3504 3505 nvme_show_int_function(cntlid); 3506 nvme_show_int_function(numa_node); 3507 nvme_show_int_function(queue_count); 3508 nvme_show_int_function(sqsize); 3509 nvme_show_int_function(kato); 3510 3511 static ssize_t nvme_sysfs_delete(struct device *dev, 3512 struct device_attribute *attr, const char *buf, 3513 size_t count) 3514 { 3515 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3516 3517 if (device_remove_file_self(dev, attr)) 3518 nvme_delete_ctrl_sync(ctrl); 3519 return count; 3520 } 3521 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 3522 3523 static ssize_t nvme_sysfs_show_transport(struct device *dev, 3524 struct device_attribute *attr, 3525 char *buf) 3526 { 3527 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3528 3529 return sysfs_emit(buf, "%s\n", ctrl->ops->name); 3530 } 3531 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 3532 3533 static ssize_t nvme_sysfs_show_state(struct device *dev, 3534 struct device_attribute *attr, 3535 char *buf) 3536 { 3537 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3538 static const char *const state_name[] = { 3539 [NVME_CTRL_NEW] = "new", 3540 [NVME_CTRL_LIVE] = "live", 3541 [NVME_CTRL_RESETTING] = "resetting", 3542 [NVME_CTRL_CONNECTING] = "connecting", 3543 [NVME_CTRL_DELETING] = "deleting", 3544 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)", 3545 [NVME_CTRL_DEAD] = "dead", 3546 }; 3547 3548 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 3549 state_name[ctrl->state]) 3550 return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); 3551 3552 return sysfs_emit(buf, "unknown state\n"); 3553 } 3554 3555 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 3556 3557 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 3558 struct device_attribute *attr, 3559 char *buf) 3560 { 3561 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3562 3563 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); 3564 } 3565 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 3566 3567 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, 3568 struct device_attribute *attr, 3569 char *buf) 3570 { 3571 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3572 3573 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); 3574 } 3575 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); 3576 3577 static ssize_t nvme_sysfs_show_hostid(struct device *dev, 3578 struct device_attribute *attr, 3579 char *buf) 3580 { 3581 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3582 3583 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); 3584 } 3585 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); 3586 3587 static ssize_t nvme_sysfs_show_address(struct device *dev, 3588 struct device_attribute *attr, 3589 char *buf) 3590 { 3591 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3592 3593 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 3594 } 3595 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 3596 3597 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, 3598 struct device_attribute *attr, char *buf) 3599 { 3600 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3601 struct nvmf_ctrl_options *opts = ctrl->opts; 3602 3603 if (ctrl->opts->max_reconnects == -1) 3604 return sysfs_emit(buf, "off\n"); 3605 return sysfs_emit(buf, "%d\n", 3606 opts->max_reconnects * opts->reconnect_delay); 3607 } 3608 3609 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, 3610 struct device_attribute *attr, const char *buf, size_t count) 3611 { 3612 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3613 struct nvmf_ctrl_options *opts = ctrl->opts; 3614 int ctrl_loss_tmo, err; 3615 3616 err = kstrtoint(buf, 10, &ctrl_loss_tmo); 3617 if (err) 3618 return -EINVAL; 3619 3620 if (ctrl_loss_tmo < 0) 3621 opts->max_reconnects = -1; 3622 else 3623 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 3624 opts->reconnect_delay); 3625 return count; 3626 } 3627 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR, 3628 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store); 3629 3630 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, 3631 struct device_attribute *attr, char *buf) 3632 { 3633 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3634 3635 if (ctrl->opts->reconnect_delay == -1) 3636 return sysfs_emit(buf, "off\n"); 3637 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); 3638 } 3639 3640 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, 3641 struct device_attribute *attr, const char *buf, size_t count) 3642 { 3643 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3644 unsigned int v; 3645 int err; 3646 3647 err = kstrtou32(buf, 10, &v); 3648 if (err) 3649 return err; 3650 3651 ctrl->opts->reconnect_delay = v; 3652 return count; 3653 } 3654 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, 3655 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store); 3656 3657 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev, 3658 struct device_attribute *attr, char *buf) 3659 { 3660 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3661 3662 if (ctrl->opts->fast_io_fail_tmo == -1) 3663 return sysfs_emit(buf, "off\n"); 3664 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo); 3665 } 3666 3667 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, 3668 struct device_attribute *attr, const char *buf, size_t count) 3669 { 3670 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3671 struct nvmf_ctrl_options *opts = ctrl->opts; 3672 int fast_io_fail_tmo, err; 3673 3674 err = kstrtoint(buf, 10, &fast_io_fail_tmo); 3675 if (err) 3676 return -EINVAL; 3677 3678 if (fast_io_fail_tmo < 0) 3679 opts->fast_io_fail_tmo = -1; 3680 else 3681 opts->fast_io_fail_tmo = fast_io_fail_tmo; 3682 return count; 3683 } 3684 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, 3685 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); 3686 3687 static ssize_t cntrltype_show(struct device *dev, 3688 struct device_attribute *attr, char *buf) 3689 { 3690 static const char * const type[] = { 3691 [NVME_CTRL_IO] = "io\n", 3692 [NVME_CTRL_DISC] = "discovery\n", 3693 [NVME_CTRL_ADMIN] = "admin\n", 3694 }; 3695 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3696 3697 if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype]) 3698 return sysfs_emit(buf, "reserved\n"); 3699 3700 return sysfs_emit(buf, type[ctrl->cntrltype]); 3701 } 3702 static DEVICE_ATTR_RO(cntrltype); 3703 3704 static ssize_t dctype_show(struct device *dev, 3705 struct device_attribute *attr, char *buf) 3706 { 3707 static const char * const type[] = { 3708 [NVME_DCTYPE_NOT_REPORTED] = "none\n", 3709 [NVME_DCTYPE_DDC] = "ddc\n", 3710 [NVME_DCTYPE_CDC] = "cdc\n", 3711 }; 3712 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3713 3714 if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype]) 3715 return sysfs_emit(buf, "reserved\n"); 3716 3717 return sysfs_emit(buf, type[ctrl->dctype]); 3718 } 3719 static DEVICE_ATTR_RO(dctype); 3720 3721 #ifdef CONFIG_NVME_AUTH 3722 static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, 3723 struct device_attribute *attr, char *buf) 3724 { 3725 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3726 struct nvmf_ctrl_options *opts = ctrl->opts; 3727 3728 if (!opts->dhchap_secret) 3729 return sysfs_emit(buf, "none\n"); 3730 return sysfs_emit(buf, "%s\n", opts->dhchap_secret); 3731 } 3732 3733 static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev, 3734 struct device_attribute *attr, const char *buf, size_t count) 3735 { 3736 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3737 struct nvmf_ctrl_options *opts = ctrl->opts; 3738 char *dhchap_secret; 3739 3740 if (!ctrl->opts->dhchap_secret) 3741 return -EINVAL; 3742 if (count < 7) 3743 return -EINVAL; 3744 if (memcmp(buf, "DHHC-1:", 7)) 3745 return -EINVAL; 3746 3747 dhchap_secret = kzalloc(count + 1, GFP_KERNEL); 3748 if (!dhchap_secret) 3749 return -ENOMEM; 3750 memcpy(dhchap_secret, buf, count); 3751 nvme_auth_stop(ctrl); 3752 if (strcmp(dhchap_secret, opts->dhchap_secret)) { 3753 struct nvme_dhchap_key *key, *host_key; 3754 int ret; 3755 3756 ret = nvme_auth_generate_key(dhchap_secret, &key); 3757 if (ret) 3758 return ret; 3759 kfree(opts->dhchap_secret); 3760 opts->dhchap_secret = dhchap_secret; 3761 host_key = ctrl->host_key; 3762 mutex_lock(&ctrl->dhchap_auth_mutex); 3763 ctrl->host_key = key; 3764 mutex_unlock(&ctrl->dhchap_auth_mutex); 3765 nvme_auth_free_key(host_key); 3766 } 3767 /* Start re-authentication */ 3768 dev_info(ctrl->device, "re-authenticating controller\n"); 3769 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 3770 3771 return count; 3772 } 3773 static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR, 3774 nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store); 3775 3776 static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev, 3777 struct device_attribute *attr, char *buf) 3778 { 3779 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3780 struct nvmf_ctrl_options *opts = ctrl->opts; 3781 3782 if (!opts->dhchap_ctrl_secret) 3783 return sysfs_emit(buf, "none\n"); 3784 return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret); 3785 } 3786 3787 static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev, 3788 struct device_attribute *attr, const char *buf, size_t count) 3789 { 3790 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3791 struct nvmf_ctrl_options *opts = ctrl->opts; 3792 char *dhchap_secret; 3793 3794 if (!ctrl->opts->dhchap_ctrl_secret) 3795 return -EINVAL; 3796 if (count < 7) 3797 return -EINVAL; 3798 if (memcmp(buf, "DHHC-1:", 7)) 3799 return -EINVAL; 3800 3801 dhchap_secret = kzalloc(count + 1, GFP_KERNEL); 3802 if (!dhchap_secret) 3803 return -ENOMEM; 3804 memcpy(dhchap_secret, buf, count); 3805 nvme_auth_stop(ctrl); 3806 if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) { 3807 struct nvme_dhchap_key *key, *ctrl_key; 3808 int ret; 3809 3810 ret = nvme_auth_generate_key(dhchap_secret, &key); 3811 if (ret) 3812 return ret; 3813 kfree(opts->dhchap_ctrl_secret); 3814 opts->dhchap_ctrl_secret = dhchap_secret; 3815 ctrl_key = ctrl->ctrl_key; 3816 mutex_lock(&ctrl->dhchap_auth_mutex); 3817 ctrl->ctrl_key = key; 3818 mutex_unlock(&ctrl->dhchap_auth_mutex); 3819 nvme_auth_free_key(ctrl_key); 3820 } 3821 /* Start re-authentication */ 3822 dev_info(ctrl->device, "re-authenticating controller\n"); 3823 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 3824 3825 return count; 3826 } 3827 static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, 3828 nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); 3829 #endif 3830 3831 static struct attribute *nvme_dev_attrs[] = { 3832 &dev_attr_reset_controller.attr, 3833 &dev_attr_rescan_controller.attr, 3834 &dev_attr_model.attr, 3835 &dev_attr_serial.attr, 3836 &dev_attr_firmware_rev.attr, 3837 &dev_attr_cntlid.attr, 3838 &dev_attr_delete_controller.attr, 3839 &dev_attr_transport.attr, 3840 &dev_attr_subsysnqn.attr, 3841 &dev_attr_address.attr, 3842 &dev_attr_state.attr, 3843 &dev_attr_numa_node.attr, 3844 &dev_attr_queue_count.attr, 3845 &dev_attr_sqsize.attr, 3846 &dev_attr_hostnqn.attr, 3847 &dev_attr_hostid.attr, 3848 &dev_attr_ctrl_loss_tmo.attr, 3849 &dev_attr_reconnect_delay.attr, 3850 &dev_attr_fast_io_fail_tmo.attr, 3851 &dev_attr_kato.attr, 3852 &dev_attr_cntrltype.attr, 3853 &dev_attr_dctype.attr, 3854 #ifdef CONFIG_NVME_AUTH 3855 &dev_attr_dhchap_secret.attr, 3856 &dev_attr_dhchap_ctrl_secret.attr, 3857 #endif 3858 NULL 3859 }; 3860 3861 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 3862 struct attribute *a, int n) 3863 { 3864 struct device *dev = container_of(kobj, struct device, kobj); 3865 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3866 3867 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 3868 return 0; 3869 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 3870 return 0; 3871 if (a == &dev_attr_hostnqn.attr && !ctrl->opts) 3872 return 0; 3873 if (a == &dev_attr_hostid.attr && !ctrl->opts) 3874 return 0; 3875 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) 3876 return 0; 3877 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) 3878 return 0; 3879 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) 3880 return 0; 3881 #ifdef CONFIG_NVME_AUTH 3882 if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts) 3883 return 0; 3884 if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) 3885 return 0; 3886 #endif 3887 3888 return a->mode; 3889 } 3890 3891 const struct attribute_group nvme_dev_attrs_group = { 3892 .attrs = nvme_dev_attrs, 3893 .is_visible = nvme_dev_attrs_are_visible, 3894 }; 3895 EXPORT_SYMBOL_GPL(nvme_dev_attrs_group); 3896 3897 static const struct attribute_group *nvme_dev_attr_groups[] = { 3898 &nvme_dev_attrs_group, 3899 NULL, 3900 }; 3901 3902 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, 3903 unsigned nsid) 3904 { 3905 struct nvme_ns_head *h; 3906 3907 lockdep_assert_held(&ctrl->subsys->lock); 3908 3909 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { 3910 /* 3911 * Private namespaces can share NSIDs under some conditions. 3912 * In that case we can't use the same ns_head for namespaces 3913 * with the same NSID. 3914 */ 3915 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) 3916 continue; 3917 if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) 3918 return h; 3919 } 3920 3921 return NULL; 3922 } 3923 3924 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, 3925 struct nvme_ns_ids *ids) 3926 { 3927 bool has_uuid = !uuid_is_null(&ids->uuid); 3928 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid)); 3929 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 3930 struct nvme_ns_head *h; 3931 3932 lockdep_assert_held(&subsys->lock); 3933 3934 list_for_each_entry(h, &subsys->nsheads, entry) { 3935 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid)) 3936 return -EINVAL; 3937 if (has_nguid && 3938 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0) 3939 return -EINVAL; 3940 if (has_eui64 && 3941 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0) 3942 return -EINVAL; 3943 } 3944 3945 return 0; 3946 } 3947 3948 static void nvme_cdev_rel(struct device *dev) 3949 { 3950 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); 3951 } 3952 3953 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) 3954 { 3955 cdev_device_del(cdev, cdev_device); 3956 put_device(cdev_device); 3957 } 3958 3959 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, 3960 const struct file_operations *fops, struct module *owner) 3961 { 3962 int minor, ret; 3963 3964 minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL); 3965 if (minor < 0) 3966 return minor; 3967 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); 3968 cdev_device->class = nvme_ns_chr_class; 3969 cdev_device->release = nvme_cdev_rel; 3970 device_initialize(cdev_device); 3971 cdev_init(cdev, fops); 3972 cdev->owner = owner; 3973 ret = cdev_device_add(cdev, cdev_device); 3974 if (ret) 3975 put_device(cdev_device); 3976 3977 return ret; 3978 } 3979 3980 static int nvme_ns_chr_open(struct inode *inode, struct file *file) 3981 { 3982 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3983 } 3984 3985 static int nvme_ns_chr_release(struct inode *inode, struct file *file) 3986 { 3987 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3988 return 0; 3989 } 3990 3991 static const struct file_operations nvme_ns_chr_fops = { 3992 .owner = THIS_MODULE, 3993 .open = nvme_ns_chr_open, 3994 .release = nvme_ns_chr_release, 3995 .unlocked_ioctl = nvme_ns_chr_ioctl, 3996 .compat_ioctl = compat_ptr_ioctl, 3997 .uring_cmd = nvme_ns_chr_uring_cmd, 3998 .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, 3999 }; 4000 4001 static int nvme_add_ns_cdev(struct nvme_ns *ns) 4002 { 4003 int ret; 4004 4005 ns->cdev_device.parent = ns->ctrl->device; 4006 ret = dev_set_name(&ns->cdev_device, "ng%dn%d", 4007 ns->ctrl->instance, ns->head->instance); 4008 if (ret) 4009 return ret; 4010 4011 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, 4012 ns->ctrl->ops->module); 4013 } 4014 4015 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 4016 struct nvme_ns_info *info) 4017 { 4018 struct nvme_ns_head *head; 4019 size_t size = sizeof(*head); 4020 int ret = -ENOMEM; 4021 4022 #ifdef CONFIG_NVME_MULTIPATH 4023 size += num_possible_nodes() * sizeof(struct nvme_ns *); 4024 #endif 4025 4026 head = kzalloc(size, GFP_KERNEL); 4027 if (!head) 4028 goto out; 4029 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL); 4030 if (ret < 0) 4031 goto out_free_head; 4032 head->instance = ret; 4033 INIT_LIST_HEAD(&head->list); 4034 ret = init_srcu_struct(&head->srcu); 4035 if (ret) 4036 goto out_ida_remove; 4037 head->subsys = ctrl->subsys; 4038 head->ns_id = info->nsid; 4039 head->ids = info->ids; 4040 head->shared = info->is_shared; 4041 kref_init(&head->ref); 4042 4043 if (head->ids.csi) { 4044 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); 4045 if (ret) 4046 goto out_cleanup_srcu; 4047 } else 4048 head->effects = ctrl->effects; 4049 4050 ret = nvme_mpath_alloc_disk(ctrl, head); 4051 if (ret) 4052 goto out_cleanup_srcu; 4053 4054 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 4055 4056 kref_get(&ctrl->subsys->ref); 4057 4058 return head; 4059 out_cleanup_srcu: 4060 cleanup_srcu_struct(&head->srcu); 4061 out_ida_remove: 4062 ida_free(&ctrl->subsys->ns_ida, head->instance); 4063 out_free_head: 4064 kfree(head); 4065 out: 4066 if (ret > 0) 4067 ret = blk_status_to_errno(nvme_error_status(ret)); 4068 return ERR_PTR(ret); 4069 } 4070 4071 static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this, 4072 struct nvme_ns_ids *ids) 4073 { 4074 struct nvme_subsystem *s; 4075 int ret = 0; 4076 4077 /* 4078 * Note that this check is racy as we try to avoid holding the global 4079 * lock over the whole ns_head creation. But it is only intended as 4080 * a sanity check anyway. 4081 */ 4082 mutex_lock(&nvme_subsystems_lock); 4083 list_for_each_entry(s, &nvme_subsystems, entry) { 4084 if (s == this) 4085 continue; 4086 mutex_lock(&s->lock); 4087 ret = nvme_subsys_check_duplicate_ids(s, ids); 4088 mutex_unlock(&s->lock); 4089 if (ret) 4090 break; 4091 } 4092 mutex_unlock(&nvme_subsystems_lock); 4093 4094 return ret; 4095 } 4096 4097 static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) 4098 { 4099 struct nvme_ctrl *ctrl = ns->ctrl; 4100 struct nvme_ns_head *head = NULL; 4101 int ret; 4102 4103 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids); 4104 if (ret) { 4105 dev_err(ctrl->device, 4106 "globally duplicate IDs for nsid %d\n", info->nsid); 4107 nvme_print_device_info(ctrl); 4108 return ret; 4109 } 4110 4111 mutex_lock(&ctrl->subsys->lock); 4112 head = nvme_find_ns_head(ctrl, info->nsid); 4113 if (!head) { 4114 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids); 4115 if (ret) { 4116 dev_err(ctrl->device, 4117 "duplicate IDs in subsystem for nsid %d\n", 4118 info->nsid); 4119 goto out_unlock; 4120 } 4121 head = nvme_alloc_ns_head(ctrl, info); 4122 if (IS_ERR(head)) { 4123 ret = PTR_ERR(head); 4124 goto out_unlock; 4125 } 4126 } else { 4127 ret = -EINVAL; 4128 if (!info->is_shared || !head->shared) { 4129 dev_err(ctrl->device, 4130 "Duplicate unshared namespace %d\n", 4131 info->nsid); 4132 goto out_put_ns_head; 4133 } 4134 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) { 4135 dev_err(ctrl->device, 4136 "IDs don't match for shared namespace %d\n", 4137 info->nsid); 4138 goto out_put_ns_head; 4139 } 4140 4141 if (!multipath && !list_empty(&head->list)) { 4142 dev_warn(ctrl->device, 4143 "Found shared namespace %d, but multipathing not supported.\n", 4144 info->nsid); 4145 dev_warn_once(ctrl->device, 4146 "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n."); 4147 } 4148 } 4149 4150 list_add_tail_rcu(&ns->siblings, &head->list); 4151 ns->head = head; 4152 mutex_unlock(&ctrl->subsys->lock); 4153 return 0; 4154 4155 out_put_ns_head: 4156 nvme_put_ns_head(head); 4157 out_unlock: 4158 mutex_unlock(&ctrl->subsys->lock); 4159 return ret; 4160 } 4161 4162 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 4163 { 4164 struct nvme_ns *ns, *ret = NULL; 4165 4166 down_read(&ctrl->namespaces_rwsem); 4167 list_for_each_entry(ns, &ctrl->namespaces, list) { 4168 if (ns->head->ns_id == nsid) { 4169 if (!nvme_get_ns(ns)) 4170 continue; 4171 ret = ns; 4172 break; 4173 } 4174 if (ns->head->ns_id > nsid) 4175 break; 4176 } 4177 up_read(&ctrl->namespaces_rwsem); 4178 return ret; 4179 } 4180 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); 4181 4182 /* 4183 * Add the namespace to the controller list while keeping the list ordered. 4184 */ 4185 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) 4186 { 4187 struct nvme_ns *tmp; 4188 4189 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { 4190 if (tmp->head->ns_id < ns->head->ns_id) { 4191 list_add(&ns->list, &tmp->list); 4192 return; 4193 } 4194 } 4195 list_add(&ns->list, &ns->ctrl->namespaces); 4196 } 4197 4198 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) 4199 { 4200 struct nvme_ns *ns; 4201 struct gendisk *disk; 4202 int node = ctrl->numa_node; 4203 4204 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 4205 if (!ns) 4206 return; 4207 4208 disk = blk_mq_alloc_disk(ctrl->tagset, ns); 4209 if (IS_ERR(disk)) 4210 goto out_free_ns; 4211 disk->fops = &nvme_bdev_ops; 4212 disk->private_data = ns; 4213 4214 ns->disk = disk; 4215 ns->queue = disk->queue; 4216 4217 if (ctrl->opts && ctrl->opts->data_digest) 4218 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); 4219 4220 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 4221 if (ctrl->ops->supports_pci_p2pdma && 4222 ctrl->ops->supports_pci_p2pdma(ctrl)) 4223 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); 4224 4225 ns->ctrl = ctrl; 4226 kref_init(&ns->kref); 4227 4228 if (nvme_init_ns_head(ns, info)) 4229 goto out_cleanup_disk; 4230 4231 /* 4232 * If multipathing is enabled, the device name for all disks and not 4233 * just those that represent shared namespaces needs to be based on the 4234 * subsystem instance. Using the controller instance for private 4235 * namespaces could lead to naming collisions between shared and private 4236 * namespaces if they don't use a common numbering scheme. 4237 * 4238 * If multipathing is not enabled, disk names must use the controller 4239 * instance as shared namespaces will show up as multiple block 4240 * devices. 4241 */ 4242 if (ns->head->disk) { 4243 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, 4244 ctrl->instance, ns->head->instance); 4245 disk->flags |= GENHD_FL_HIDDEN; 4246 } else if (multipath) { 4247 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, 4248 ns->head->instance); 4249 } else { 4250 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, 4251 ns->head->instance); 4252 } 4253 4254 if (nvme_update_ns_info(ns, info)) 4255 goto out_unlink_ns; 4256 4257 down_write(&ctrl->namespaces_rwsem); 4258 nvme_ns_add_to_ctrl_list(ns); 4259 up_write(&ctrl->namespaces_rwsem); 4260 nvme_get_ctrl(ctrl); 4261 4262 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups)) 4263 goto out_cleanup_ns_from_list; 4264 4265 if (!nvme_ns_head_multipath(ns->head)) 4266 nvme_add_ns_cdev(ns); 4267 4268 nvme_mpath_add_disk(ns, info->anagrpid); 4269 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); 4270 4271 return; 4272 4273 out_cleanup_ns_from_list: 4274 nvme_put_ctrl(ctrl); 4275 down_write(&ctrl->namespaces_rwsem); 4276 list_del_init(&ns->list); 4277 up_write(&ctrl->namespaces_rwsem); 4278 out_unlink_ns: 4279 mutex_lock(&ctrl->subsys->lock); 4280 list_del_rcu(&ns->siblings); 4281 if (list_empty(&ns->head->list)) 4282 list_del_init(&ns->head->entry); 4283 mutex_unlock(&ctrl->subsys->lock); 4284 nvme_put_ns_head(ns->head); 4285 out_cleanup_disk: 4286 put_disk(disk); 4287 out_free_ns: 4288 kfree(ns); 4289 } 4290 4291 static void nvme_ns_remove(struct nvme_ns *ns) 4292 { 4293 bool last_path = false; 4294 4295 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 4296 return; 4297 4298 clear_bit(NVME_NS_READY, &ns->flags); 4299 set_capacity(ns->disk, 0); 4300 nvme_fault_inject_fini(&ns->fault_inject); 4301 4302 /* 4303 * Ensure that !NVME_NS_READY is seen by other threads to prevent 4304 * this ns going back into current_path. 4305 */ 4306 synchronize_srcu(&ns->head->srcu); 4307 4308 /* wait for concurrent submissions */ 4309 if (nvme_mpath_clear_current_path(ns)) 4310 synchronize_srcu(&ns->head->srcu); 4311 4312 mutex_lock(&ns->ctrl->subsys->lock); 4313 list_del_rcu(&ns->siblings); 4314 if (list_empty(&ns->head->list)) { 4315 list_del_init(&ns->head->entry); 4316 last_path = true; 4317 } 4318 mutex_unlock(&ns->ctrl->subsys->lock); 4319 4320 /* guarantee not available in head->list */ 4321 synchronize_srcu(&ns->head->srcu); 4322 4323 if (!nvme_ns_head_multipath(ns->head)) 4324 nvme_cdev_del(&ns->cdev, &ns->cdev_device); 4325 del_gendisk(ns->disk); 4326 4327 down_write(&ns->ctrl->namespaces_rwsem); 4328 list_del_init(&ns->list); 4329 up_write(&ns->ctrl->namespaces_rwsem); 4330 4331 if (last_path) 4332 nvme_mpath_shutdown_disk(ns->head); 4333 nvme_put_ns(ns); 4334 } 4335 4336 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) 4337 { 4338 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); 4339 4340 if (ns) { 4341 nvme_ns_remove(ns); 4342 nvme_put_ns(ns); 4343 } 4344 } 4345 4346 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) 4347 { 4348 int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; 4349 4350 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { 4351 dev_err(ns->ctrl->device, 4352 "identifiers changed for nsid %d\n", ns->head->ns_id); 4353 goto out; 4354 } 4355 4356 ret = nvme_update_ns_info(ns, info); 4357 out: 4358 /* 4359 * Only remove the namespace if we got a fatal error back from the 4360 * device, otherwise ignore the error and just move on. 4361 * 4362 * TODO: we should probably schedule a delayed retry here. 4363 */ 4364 if (ret > 0 && (ret & NVME_SC_DNR)) 4365 nvme_ns_remove(ns); 4366 } 4367 4368 static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) 4369 { 4370 struct nvme_ns_info info = { .nsid = nsid }; 4371 struct nvme_ns *ns; 4372 4373 if (nvme_identify_ns_descs(ctrl, &info)) 4374 return; 4375 4376 if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) { 4377 dev_warn(ctrl->device, 4378 "command set not reported for nsid: %d\n", nsid); 4379 return; 4380 } 4381 4382 /* 4383 * If available try to use the Command Set Idependent Identify Namespace 4384 * data structure to find all the generic information that is needed to 4385 * set up a namespace. If not fall back to the legacy version. 4386 */ 4387 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || 4388 (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) { 4389 if (nvme_ns_info_from_id_cs_indep(ctrl, &info)) 4390 return; 4391 } else { 4392 if (nvme_ns_info_from_identify(ctrl, &info)) 4393 return; 4394 } 4395 4396 /* 4397 * Ignore the namespace if it is not ready. We will get an AEN once it 4398 * becomes ready and restart the scan. 4399 */ 4400 if (!info.is_ready) 4401 return; 4402 4403 ns = nvme_find_get_ns(ctrl, nsid); 4404 if (ns) { 4405 nvme_validate_ns(ns, &info); 4406 nvme_put_ns(ns); 4407 } else { 4408 nvme_alloc_ns(ctrl, &info); 4409 } 4410 } 4411 4412 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 4413 unsigned nsid) 4414 { 4415 struct nvme_ns *ns, *next; 4416 LIST_HEAD(rm_list); 4417 4418 down_write(&ctrl->namespaces_rwsem); 4419 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 4420 if (ns->head->ns_id > nsid) 4421 list_move_tail(&ns->list, &rm_list); 4422 } 4423 up_write(&ctrl->namespaces_rwsem); 4424 4425 list_for_each_entry_safe(ns, next, &rm_list, list) 4426 nvme_ns_remove(ns); 4427 4428 } 4429 4430 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) 4431 { 4432 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); 4433 __le32 *ns_list; 4434 u32 prev = 0; 4435 int ret = 0, i; 4436 4437 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 4438 if (!ns_list) 4439 return -ENOMEM; 4440 4441 for (;;) { 4442 struct nvme_command cmd = { 4443 .identify.opcode = nvme_admin_identify, 4444 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, 4445 .identify.nsid = cpu_to_le32(prev), 4446 }; 4447 4448 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, 4449 NVME_IDENTIFY_DATA_SIZE); 4450 if (ret) { 4451 dev_warn(ctrl->device, 4452 "Identify NS List failed (status=0x%x)\n", ret); 4453 goto free; 4454 } 4455 4456 for (i = 0; i < nr_entries; i++) { 4457 u32 nsid = le32_to_cpu(ns_list[i]); 4458 4459 if (!nsid) /* end of the list? */ 4460 goto out; 4461 nvme_scan_ns(ctrl, nsid); 4462 while (++prev < nsid) 4463 nvme_ns_remove_by_nsid(ctrl, prev); 4464 } 4465 } 4466 out: 4467 nvme_remove_invalid_namespaces(ctrl, prev); 4468 free: 4469 kfree(ns_list); 4470 return ret; 4471 } 4472 4473 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) 4474 { 4475 struct nvme_id_ctrl *id; 4476 u32 nn, i; 4477 4478 if (nvme_identify_ctrl(ctrl, &id)) 4479 return; 4480 nn = le32_to_cpu(id->nn); 4481 kfree(id); 4482 4483 for (i = 1; i <= nn; i++) 4484 nvme_scan_ns(ctrl, i); 4485 4486 nvme_remove_invalid_namespaces(ctrl, nn); 4487 } 4488 4489 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 4490 { 4491 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 4492 __le32 *log; 4493 int error; 4494 4495 log = kzalloc(log_size, GFP_KERNEL); 4496 if (!log) 4497 return; 4498 4499 /* 4500 * We need to read the log to clear the AEN, but we don't want to rely 4501 * on it for the changed namespace information as userspace could have 4502 * raced with us in reading the log page, which could cause us to miss 4503 * updates. 4504 */ 4505 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, 4506 NVME_CSI_NVM, log, log_size, 0); 4507 if (error) 4508 dev_warn(ctrl->device, 4509 "reading changed ns log failed: %d\n", error); 4510 4511 kfree(log); 4512 } 4513 4514 static void nvme_scan_work(struct work_struct *work) 4515 { 4516 struct nvme_ctrl *ctrl = 4517 container_of(work, struct nvme_ctrl, scan_work); 4518 int ret; 4519 4520 /* No tagset on a live ctrl means IO queues could not created */ 4521 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) 4522 return; 4523 4524 /* 4525 * Identify controller limits can change at controller reset due to 4526 * new firmware download, even though it is not common we cannot ignore 4527 * such scenario. Controller's non-mdts limits are reported in the unit 4528 * of logical blocks that is dependent on the format of attached 4529 * namespace. Hence re-read the limits at the time of ns allocation. 4530 */ 4531 ret = nvme_init_non_mdts_limits(ctrl); 4532 if (ret < 0) { 4533 dev_warn(ctrl->device, 4534 "reading non-mdts-limits failed: %d\n", ret); 4535 return; 4536 } 4537 4538 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 4539 dev_info(ctrl->device, "rescanning namespaces.\n"); 4540 nvme_clear_changed_ns_log(ctrl); 4541 } 4542 4543 mutex_lock(&ctrl->scan_lock); 4544 if (nvme_ctrl_limited_cns(ctrl)) { 4545 nvme_scan_ns_sequential(ctrl); 4546 } else { 4547 /* 4548 * Fall back to sequential scan if DNR is set to handle broken 4549 * devices which should support Identify NS List (as per the VS 4550 * they report) but don't actually support it. 4551 */ 4552 ret = nvme_scan_ns_list(ctrl); 4553 if (ret > 0 && ret & NVME_SC_DNR) 4554 nvme_scan_ns_sequential(ctrl); 4555 } 4556 mutex_unlock(&ctrl->scan_lock); 4557 } 4558 4559 /* 4560 * This function iterates the namespace list unlocked to allow recovery from 4561 * controller failure. It is up to the caller to ensure the namespace list is 4562 * not modified by scan work while this function is executing. 4563 */ 4564 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 4565 { 4566 struct nvme_ns *ns, *next; 4567 LIST_HEAD(ns_list); 4568 4569 /* 4570 * make sure to requeue I/O to all namespaces as these 4571 * might result from the scan itself and must complete 4572 * for the scan_work to make progress 4573 */ 4574 nvme_mpath_clear_ctrl_paths(ctrl); 4575 4576 /* prevent racing with ns scanning */ 4577 flush_work(&ctrl->scan_work); 4578 4579 /* 4580 * The dead states indicates the controller was not gracefully 4581 * disconnected. In that case, we won't be able to flush any data while 4582 * removing the namespaces' disks; fail all the queues now to avoid 4583 * potentially having to clean up the failed sync later. 4584 */ 4585 if (ctrl->state == NVME_CTRL_DEAD) { 4586 nvme_mark_namespaces_dead(ctrl); 4587 nvme_unquiesce_io_queues(ctrl); 4588 } 4589 4590 /* this is a no-op when called from the controller reset handler */ 4591 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); 4592 4593 down_write(&ctrl->namespaces_rwsem); 4594 list_splice_init(&ctrl->namespaces, &ns_list); 4595 up_write(&ctrl->namespaces_rwsem); 4596 4597 list_for_each_entry_safe(ns, next, &ns_list, list) 4598 nvme_ns_remove(ns); 4599 } 4600 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 4601 4602 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) 4603 { 4604 const struct nvme_ctrl *ctrl = 4605 container_of(dev, struct nvme_ctrl, ctrl_device); 4606 struct nvmf_ctrl_options *opts = ctrl->opts; 4607 int ret; 4608 4609 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); 4610 if (ret) 4611 return ret; 4612 4613 if (opts) { 4614 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); 4615 if (ret) 4616 return ret; 4617 4618 ret = add_uevent_var(env, "NVME_TRSVCID=%s", 4619 opts->trsvcid ?: "none"); 4620 if (ret) 4621 return ret; 4622 4623 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", 4624 opts->host_traddr ?: "none"); 4625 if (ret) 4626 return ret; 4627 4628 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", 4629 opts->host_iface ?: "none"); 4630 } 4631 return ret; 4632 } 4633 4634 static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata) 4635 { 4636 char *envp[2] = { envdata, NULL }; 4637 4638 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4639 } 4640 4641 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 4642 { 4643 char *envp[2] = { NULL, NULL }; 4644 u32 aen_result = ctrl->aen_result; 4645 4646 ctrl->aen_result = 0; 4647 if (!aen_result) 4648 return; 4649 4650 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 4651 if (!envp[0]) 4652 return; 4653 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4654 kfree(envp[0]); 4655 } 4656 4657 static void nvme_async_event_work(struct work_struct *work) 4658 { 4659 struct nvme_ctrl *ctrl = 4660 container_of(work, struct nvme_ctrl, async_event_work); 4661 4662 nvme_aen_uevent(ctrl); 4663 4664 /* 4665 * The transport drivers must guarantee AER submission here is safe by 4666 * flushing ctrl async_event_work after changing the controller state 4667 * from LIVE and before freeing the admin queue. 4668 */ 4669 if (ctrl->state == NVME_CTRL_LIVE) 4670 ctrl->ops->submit_async_event(ctrl); 4671 } 4672 4673 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 4674 { 4675 4676 u32 csts; 4677 4678 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 4679 return false; 4680 4681 if (csts == ~0) 4682 return false; 4683 4684 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 4685 } 4686 4687 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 4688 { 4689 struct nvme_fw_slot_info_log *log; 4690 4691 log = kmalloc(sizeof(*log), GFP_KERNEL); 4692 if (!log) 4693 return; 4694 4695 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, 4696 log, sizeof(*log), 0)) 4697 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 4698 kfree(log); 4699 } 4700 4701 static void nvme_fw_act_work(struct work_struct *work) 4702 { 4703 struct nvme_ctrl *ctrl = container_of(work, 4704 struct nvme_ctrl, fw_act_work); 4705 unsigned long fw_act_timeout; 4706 4707 if (ctrl->mtfa) 4708 fw_act_timeout = jiffies + 4709 msecs_to_jiffies(ctrl->mtfa * 100); 4710 else 4711 fw_act_timeout = jiffies + 4712 msecs_to_jiffies(admin_timeout * 1000); 4713 4714 nvme_quiesce_io_queues(ctrl); 4715 while (nvme_ctrl_pp_status(ctrl)) { 4716 if (time_after(jiffies, fw_act_timeout)) { 4717 dev_warn(ctrl->device, 4718 "Fw activation timeout, reset controller\n"); 4719 nvme_try_sched_reset(ctrl); 4720 return; 4721 } 4722 msleep(100); 4723 } 4724 4725 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) 4726 return; 4727 4728 nvme_unquiesce_io_queues(ctrl); 4729 /* read FW slot information to clear the AER */ 4730 nvme_get_fw_slot_info(ctrl); 4731 4732 queue_work(nvme_wq, &ctrl->async_event_work); 4733 } 4734 4735 static u32 nvme_aer_type(u32 result) 4736 { 4737 return result & 0x7; 4738 } 4739 4740 static u32 nvme_aer_subtype(u32 result) 4741 { 4742 return (result & 0xff00) >> 8; 4743 } 4744 4745 static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 4746 { 4747 u32 aer_notice_type = nvme_aer_subtype(result); 4748 bool requeue = true; 4749 4750 trace_nvme_async_event(ctrl, aer_notice_type); 4751 4752 switch (aer_notice_type) { 4753 case NVME_AER_NOTICE_NS_CHANGED: 4754 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 4755 nvme_queue_scan(ctrl); 4756 break; 4757 case NVME_AER_NOTICE_FW_ACT_STARTING: 4758 /* 4759 * We are (ab)using the RESETTING state to prevent subsequent 4760 * recovery actions from interfering with the controller's 4761 * firmware activation. 4762 */ 4763 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { 4764 nvme_auth_stop(ctrl); 4765 requeue = false; 4766 queue_work(nvme_wq, &ctrl->fw_act_work); 4767 } 4768 break; 4769 #ifdef CONFIG_NVME_MULTIPATH 4770 case NVME_AER_NOTICE_ANA: 4771 if (!ctrl->ana_log_buf) 4772 break; 4773 queue_work(nvme_wq, &ctrl->ana_work); 4774 break; 4775 #endif 4776 case NVME_AER_NOTICE_DISC_CHANGED: 4777 ctrl->aen_result = result; 4778 break; 4779 default: 4780 dev_warn(ctrl->device, "async event result %08x\n", result); 4781 } 4782 return requeue; 4783 } 4784 4785 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) 4786 { 4787 trace_nvme_async_event(ctrl, NVME_AER_ERROR); 4788 dev_warn(ctrl->device, "resetting controller due to AER\n"); 4789 nvme_reset_ctrl(ctrl); 4790 } 4791 4792 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 4793 volatile union nvme_result *res) 4794 { 4795 u32 result = le32_to_cpu(res->u32); 4796 u32 aer_type = nvme_aer_type(result); 4797 u32 aer_subtype = nvme_aer_subtype(result); 4798 bool requeue = true; 4799 4800 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 4801 return; 4802 4803 switch (aer_type) { 4804 case NVME_AER_NOTICE: 4805 requeue = nvme_handle_aen_notice(ctrl, result); 4806 break; 4807 case NVME_AER_ERROR: 4808 /* 4809 * For a persistent internal error, don't run async_event_work 4810 * to submit a new AER. The controller reset will do it. 4811 */ 4812 if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) { 4813 nvme_handle_aer_persistent_error(ctrl); 4814 return; 4815 } 4816 fallthrough; 4817 case NVME_AER_SMART: 4818 case NVME_AER_CSS: 4819 case NVME_AER_VS: 4820 trace_nvme_async_event(ctrl, aer_type); 4821 ctrl->aen_result = result; 4822 break; 4823 default: 4824 break; 4825 } 4826 4827 if (requeue) 4828 queue_work(nvme_wq, &ctrl->async_event_work); 4829 } 4830 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 4831 4832 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4833 const struct blk_mq_ops *ops, unsigned int cmd_size) 4834 { 4835 int ret; 4836 4837 memset(set, 0, sizeof(*set)); 4838 set->ops = ops; 4839 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 4840 if (ctrl->ops->flags & NVME_F_FABRICS) 4841 set->reserved_tags = NVMF_RESERVED_TAGS; 4842 set->numa_node = ctrl->numa_node; 4843 set->flags = BLK_MQ_F_NO_SCHED; 4844 if (ctrl->ops->flags & NVME_F_BLOCKING) 4845 set->flags |= BLK_MQ_F_BLOCKING; 4846 set->cmd_size = cmd_size; 4847 set->driver_data = ctrl; 4848 set->nr_hw_queues = 1; 4849 set->timeout = NVME_ADMIN_TIMEOUT; 4850 ret = blk_mq_alloc_tag_set(set); 4851 if (ret) 4852 return ret; 4853 4854 ctrl->admin_q = blk_mq_init_queue(set); 4855 if (IS_ERR(ctrl->admin_q)) { 4856 ret = PTR_ERR(ctrl->admin_q); 4857 goto out_free_tagset; 4858 } 4859 4860 if (ctrl->ops->flags & NVME_F_FABRICS) { 4861 ctrl->fabrics_q = blk_mq_init_queue(set); 4862 if (IS_ERR(ctrl->fabrics_q)) { 4863 ret = PTR_ERR(ctrl->fabrics_q); 4864 goto out_cleanup_admin_q; 4865 } 4866 } 4867 4868 ctrl->admin_tagset = set; 4869 return 0; 4870 4871 out_cleanup_admin_q: 4872 blk_mq_destroy_queue(ctrl->admin_q); 4873 blk_put_queue(ctrl->admin_q); 4874 out_free_tagset: 4875 blk_mq_free_tag_set(ctrl->admin_tagset); 4876 return ret; 4877 } 4878 EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); 4879 4880 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) 4881 { 4882 blk_mq_destroy_queue(ctrl->admin_q); 4883 blk_put_queue(ctrl->admin_q); 4884 if (ctrl->ops->flags & NVME_F_FABRICS) { 4885 blk_mq_destroy_queue(ctrl->fabrics_q); 4886 blk_put_queue(ctrl->fabrics_q); 4887 } 4888 blk_mq_free_tag_set(ctrl->admin_tagset); 4889 } 4890 EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set); 4891 4892 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4893 const struct blk_mq_ops *ops, unsigned int nr_maps, 4894 unsigned int cmd_size) 4895 { 4896 int ret; 4897 4898 memset(set, 0, sizeof(*set)); 4899 set->ops = ops; 4900 set->queue_depth = ctrl->sqsize + 1; 4901 /* 4902 * Some Apple controllers requires tags to be unique across admin and 4903 * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. 4904 */ 4905 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) 4906 set->reserved_tags = NVME_AQ_DEPTH; 4907 else if (ctrl->ops->flags & NVME_F_FABRICS) 4908 set->reserved_tags = NVMF_RESERVED_TAGS; 4909 set->numa_node = ctrl->numa_node; 4910 set->flags = BLK_MQ_F_SHOULD_MERGE; 4911 if (ctrl->ops->flags & NVME_F_BLOCKING) 4912 set->flags |= BLK_MQ_F_BLOCKING; 4913 set->cmd_size = cmd_size, 4914 set->driver_data = ctrl; 4915 set->nr_hw_queues = ctrl->queue_count - 1; 4916 set->timeout = NVME_IO_TIMEOUT; 4917 set->nr_maps = nr_maps; 4918 ret = blk_mq_alloc_tag_set(set); 4919 if (ret) 4920 return ret; 4921 4922 if (ctrl->ops->flags & NVME_F_FABRICS) { 4923 ctrl->connect_q = blk_mq_init_queue(set); 4924 if (IS_ERR(ctrl->connect_q)) { 4925 ret = PTR_ERR(ctrl->connect_q); 4926 goto out_free_tag_set; 4927 } 4928 blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, 4929 ctrl->connect_q); 4930 } 4931 4932 ctrl->tagset = set; 4933 return 0; 4934 4935 out_free_tag_set: 4936 blk_mq_free_tag_set(set); 4937 return ret; 4938 } 4939 EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); 4940 4941 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl) 4942 { 4943 if (ctrl->ops->flags & NVME_F_FABRICS) { 4944 blk_mq_destroy_queue(ctrl->connect_q); 4945 blk_put_queue(ctrl->connect_q); 4946 } 4947 blk_mq_free_tag_set(ctrl->tagset); 4948 } 4949 EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set); 4950 4951 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 4952 { 4953 nvme_mpath_stop(ctrl); 4954 nvme_auth_stop(ctrl); 4955 nvme_stop_keep_alive(ctrl); 4956 nvme_stop_failfast_work(ctrl); 4957 flush_work(&ctrl->async_event_work); 4958 cancel_work_sync(&ctrl->fw_act_work); 4959 if (ctrl->ops->stop_ctrl) 4960 ctrl->ops->stop_ctrl(ctrl); 4961 } 4962 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 4963 4964 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 4965 { 4966 nvme_start_keep_alive(ctrl); 4967 4968 nvme_enable_aen(ctrl); 4969 4970 /* 4971 * persistent discovery controllers need to send indication to userspace 4972 * to re-read the discovery log page to learn about possible changes 4973 * that were missed. We identify persistent discovery controllers by 4974 * checking that they started once before, hence are reconnecting back. 4975 */ 4976 if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && 4977 nvme_discovery_ctrl(ctrl)) 4978 nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); 4979 4980 if (ctrl->queue_count > 1) { 4981 nvme_queue_scan(ctrl); 4982 nvme_unquiesce_io_queues(ctrl); 4983 nvme_mpath_update(ctrl); 4984 } 4985 4986 nvme_change_uevent(ctrl, "NVME_EVENT=connected"); 4987 } 4988 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 4989 4990 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4991 { 4992 nvme_hwmon_exit(ctrl); 4993 nvme_fault_inject_fini(&ctrl->fault_inject); 4994 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4995 cdev_device_del(&ctrl->cdev, ctrl->device); 4996 nvme_put_ctrl(ctrl); 4997 } 4998 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 4999 5000 static void nvme_free_cels(struct nvme_ctrl *ctrl) 5001 { 5002 struct nvme_effects_log *cel; 5003 unsigned long i; 5004 5005 xa_for_each(&ctrl->cels, i, cel) { 5006 xa_erase(&ctrl->cels, i); 5007 kfree(cel); 5008 } 5009 5010 xa_destroy(&ctrl->cels); 5011 } 5012 5013 static void nvme_free_ctrl(struct device *dev) 5014 { 5015 struct nvme_ctrl *ctrl = 5016 container_of(dev, struct nvme_ctrl, ctrl_device); 5017 struct nvme_subsystem *subsys = ctrl->subsys; 5018 5019 if (!subsys || ctrl->instance != subsys->instance) 5020 ida_free(&nvme_instance_ida, ctrl->instance); 5021 5022 nvme_free_cels(ctrl); 5023 nvme_mpath_uninit(ctrl); 5024 nvme_auth_stop(ctrl); 5025 nvme_auth_free(ctrl); 5026 __free_page(ctrl->discard_page); 5027 free_opal_dev(ctrl->opal_dev); 5028 5029 if (subsys) { 5030 mutex_lock(&nvme_subsystems_lock); 5031 list_del(&ctrl->subsys_entry); 5032 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 5033 mutex_unlock(&nvme_subsystems_lock); 5034 } 5035 5036 ctrl->ops->free_ctrl(ctrl); 5037 5038 if (subsys) 5039 nvme_put_subsystem(subsys); 5040 } 5041 5042 /* 5043 * Initialize a NVMe controller structures. This needs to be called during 5044 * earliest initialization so that we have the initialized structured around 5045 * during probing. 5046 */ 5047 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 5048 const struct nvme_ctrl_ops *ops, unsigned long quirks) 5049 { 5050 int ret; 5051 5052 ctrl->state = NVME_CTRL_NEW; 5053 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 5054 spin_lock_init(&ctrl->lock); 5055 mutex_init(&ctrl->scan_lock); 5056 INIT_LIST_HEAD(&ctrl->namespaces); 5057 xa_init(&ctrl->cels); 5058 init_rwsem(&ctrl->namespaces_rwsem); 5059 ctrl->dev = dev; 5060 ctrl->ops = ops; 5061 ctrl->quirks = quirks; 5062 ctrl->numa_node = NUMA_NO_NODE; 5063 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 5064 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 5065 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 5066 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 5067 init_waitqueue_head(&ctrl->state_wq); 5068 5069 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 5070 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); 5071 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 5072 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 5073 5074 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 5075 PAGE_SIZE); 5076 ctrl->discard_page = alloc_page(GFP_KERNEL); 5077 if (!ctrl->discard_page) { 5078 ret = -ENOMEM; 5079 goto out; 5080 } 5081 5082 ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL); 5083 if (ret < 0) 5084 goto out; 5085 ctrl->instance = ret; 5086 5087 device_initialize(&ctrl->ctrl_device); 5088 ctrl->device = &ctrl->ctrl_device; 5089 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), 5090 ctrl->instance); 5091 ctrl->device->class = nvme_class; 5092 ctrl->device->parent = ctrl->dev; 5093 if (ops->dev_attr_groups) 5094 ctrl->device->groups = ops->dev_attr_groups; 5095 else 5096 ctrl->device->groups = nvme_dev_attr_groups; 5097 ctrl->device->release = nvme_free_ctrl; 5098 dev_set_drvdata(ctrl->device, ctrl); 5099 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 5100 if (ret) 5101 goto out_release_instance; 5102 5103 nvme_get_ctrl(ctrl); 5104 cdev_init(&ctrl->cdev, &nvme_dev_fops); 5105 ctrl->cdev.owner = ops->module; 5106 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 5107 if (ret) 5108 goto out_free_name; 5109 5110 /* 5111 * Initialize latency tolerance controls. The sysfs files won't 5112 * be visible to userspace unless the device actually supports APST. 5113 */ 5114 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 5115 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 5116 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 5117 5118 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 5119 nvme_mpath_init_ctrl(ctrl); 5120 ret = nvme_auth_init_ctrl(ctrl); 5121 if (ret) 5122 goto out_free_cdev; 5123 5124 return 0; 5125 out_free_cdev: 5126 cdev_device_del(&ctrl->cdev, ctrl->device); 5127 out_free_name: 5128 nvme_put_ctrl(ctrl); 5129 kfree_const(ctrl->device->kobj.name); 5130 out_release_instance: 5131 ida_free(&nvme_instance_ida, ctrl->instance); 5132 out: 5133 if (ctrl->discard_page) 5134 __free_page(ctrl->discard_page); 5135 return ret; 5136 } 5137 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 5138 5139 /* let I/O to all namespaces fail in preparation for surprise removal */ 5140 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) 5141 { 5142 struct nvme_ns *ns; 5143 5144 down_read(&ctrl->namespaces_rwsem); 5145 list_for_each_entry(ns, &ctrl->namespaces, list) 5146 blk_mark_disk_dead(ns->disk); 5147 up_read(&ctrl->namespaces_rwsem); 5148 } 5149 EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead); 5150 5151 void nvme_unfreeze(struct nvme_ctrl *ctrl) 5152 { 5153 struct nvme_ns *ns; 5154 5155 down_read(&ctrl->namespaces_rwsem); 5156 list_for_each_entry(ns, &ctrl->namespaces, list) 5157 blk_mq_unfreeze_queue(ns->queue); 5158 up_read(&ctrl->namespaces_rwsem); 5159 } 5160 EXPORT_SYMBOL_GPL(nvme_unfreeze); 5161 5162 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 5163 { 5164 struct nvme_ns *ns; 5165 5166 down_read(&ctrl->namespaces_rwsem); 5167 list_for_each_entry(ns, &ctrl->namespaces, list) { 5168 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 5169 if (timeout <= 0) 5170 break; 5171 } 5172 up_read(&ctrl->namespaces_rwsem); 5173 return timeout; 5174 } 5175 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 5176 5177 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 5178 { 5179 struct nvme_ns *ns; 5180 5181 down_read(&ctrl->namespaces_rwsem); 5182 list_for_each_entry(ns, &ctrl->namespaces, list) 5183 blk_mq_freeze_queue_wait(ns->queue); 5184 up_read(&ctrl->namespaces_rwsem); 5185 } 5186 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 5187 5188 void nvme_start_freeze(struct nvme_ctrl *ctrl) 5189 { 5190 struct nvme_ns *ns; 5191 5192 down_read(&ctrl->namespaces_rwsem); 5193 list_for_each_entry(ns, &ctrl->namespaces, list) 5194 blk_freeze_queue_start(ns->queue); 5195 up_read(&ctrl->namespaces_rwsem); 5196 } 5197 EXPORT_SYMBOL_GPL(nvme_start_freeze); 5198 5199 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl) 5200 { 5201 if (!ctrl->tagset) 5202 return; 5203 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags)) 5204 blk_mq_quiesce_tagset(ctrl->tagset); 5205 else 5206 blk_mq_wait_quiesce_done(ctrl->tagset); 5207 } 5208 EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues); 5209 5210 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl) 5211 { 5212 if (!ctrl->tagset) 5213 return; 5214 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags)) 5215 blk_mq_unquiesce_tagset(ctrl->tagset); 5216 } 5217 EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues); 5218 5219 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl) 5220 { 5221 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 5222 blk_mq_quiesce_queue(ctrl->admin_q); 5223 else 5224 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set); 5225 } 5226 EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue); 5227 5228 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl) 5229 { 5230 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 5231 blk_mq_unquiesce_queue(ctrl->admin_q); 5232 } 5233 EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue); 5234 5235 void nvme_sync_io_queues(struct nvme_ctrl *ctrl) 5236 { 5237 struct nvme_ns *ns; 5238 5239 down_read(&ctrl->namespaces_rwsem); 5240 list_for_each_entry(ns, &ctrl->namespaces, list) 5241 blk_sync_queue(ns->queue); 5242 up_read(&ctrl->namespaces_rwsem); 5243 } 5244 EXPORT_SYMBOL_GPL(nvme_sync_io_queues); 5245 5246 void nvme_sync_queues(struct nvme_ctrl *ctrl) 5247 { 5248 nvme_sync_io_queues(ctrl); 5249 if (ctrl->admin_q) 5250 blk_sync_queue(ctrl->admin_q); 5251 } 5252 EXPORT_SYMBOL_GPL(nvme_sync_queues); 5253 5254 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) 5255 { 5256 if (file->f_op != &nvme_dev_fops) 5257 return NULL; 5258 return file->private_data; 5259 } 5260 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); 5261 5262 /* 5263 * Check we didn't inadvertently grow the command structure sizes: 5264 */ 5265 static inline void _nvme_check_size(void) 5266 { 5267 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 5268 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 5269 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 5270 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 5271 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 5272 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 5273 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 5274 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 5275 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 5276 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 5277 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 5278 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 5279 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 5280 BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) != 5281 NVME_IDENTIFY_DATA_SIZE); 5282 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); 5283 BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE); 5284 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); 5285 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); 5286 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 5287 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 5288 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 5289 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 5290 BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); 5291 } 5292 5293 5294 static int __init nvme_core_init(void) 5295 { 5296 int result = -ENOMEM; 5297 5298 _nvme_check_size(); 5299 5300 nvme_wq = alloc_workqueue("nvme-wq", 5301 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 5302 if (!nvme_wq) 5303 goto out; 5304 5305 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 5306 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 5307 if (!nvme_reset_wq) 5308 goto destroy_wq; 5309 5310 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 5311 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 5312 if (!nvme_delete_wq) 5313 goto destroy_reset_wq; 5314 5315 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0, 5316 NVME_MINORS, "nvme"); 5317 if (result < 0) 5318 goto destroy_delete_wq; 5319 5320 nvme_class = class_create(THIS_MODULE, "nvme"); 5321 if (IS_ERR(nvme_class)) { 5322 result = PTR_ERR(nvme_class); 5323 goto unregister_chrdev; 5324 } 5325 nvme_class->dev_uevent = nvme_class_uevent; 5326 5327 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); 5328 if (IS_ERR(nvme_subsys_class)) { 5329 result = PTR_ERR(nvme_subsys_class); 5330 goto destroy_class; 5331 } 5332 5333 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, 5334 "nvme-generic"); 5335 if (result < 0) 5336 goto destroy_subsys_class; 5337 5338 nvme_ns_chr_class = class_create(THIS_MODULE, "nvme-generic"); 5339 if (IS_ERR(nvme_ns_chr_class)) { 5340 result = PTR_ERR(nvme_ns_chr_class); 5341 goto unregister_generic_ns; 5342 } 5343 5344 result = nvme_init_auth(); 5345 if (result) 5346 goto destroy_ns_chr; 5347 return 0; 5348 5349 destroy_ns_chr: 5350 class_destroy(nvme_ns_chr_class); 5351 unregister_generic_ns: 5352 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 5353 destroy_subsys_class: 5354 class_destroy(nvme_subsys_class); 5355 destroy_class: 5356 class_destroy(nvme_class); 5357 unregister_chrdev: 5358 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 5359 destroy_delete_wq: 5360 destroy_workqueue(nvme_delete_wq); 5361 destroy_reset_wq: 5362 destroy_workqueue(nvme_reset_wq); 5363 destroy_wq: 5364 destroy_workqueue(nvme_wq); 5365 out: 5366 return result; 5367 } 5368 5369 static void __exit nvme_core_exit(void) 5370 { 5371 nvme_exit_auth(); 5372 class_destroy(nvme_ns_chr_class); 5373 class_destroy(nvme_subsys_class); 5374 class_destroy(nvme_class); 5375 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 5376 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 5377 destroy_workqueue(nvme_delete_wq); 5378 destroy_workqueue(nvme_reset_wq); 5379 destroy_workqueue(nvme_wq); 5380 ida_destroy(&nvme_ns_chr_minor_ida); 5381 ida_destroy(&nvme_instance_ida); 5382 } 5383 5384 MODULE_LICENSE("GPL"); 5385 MODULE_VERSION("1.0"); 5386 module_init(nvme_core_init); 5387 module_exit(nvme_core_exit); 5388