1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/blkdev.h> 8 #include <linux/blk-mq.h> 9 #include <linux/blk-integrity.h> 10 #include <linux/compat.h> 11 #include <linux/delay.h> 12 #include <linux/errno.h> 13 #include <linux/hdreg.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/pr.h> 20 #include <linux/ptrace.h> 21 #include <linux/nvme_ioctl.h> 22 #include <linux/pm_qos.h> 23 #include <asm/unaligned.h> 24 25 #include "nvme.h" 26 #include "fabrics.h" 27 #include <linux/nvme-auth.h> 28 29 #define CREATE_TRACE_POINTS 30 #include "trace.h" 31 32 #define NVME_MINORS (1U << MINORBITS) 33 34 struct nvme_ns_info { 35 struct nvme_ns_ids ids; 36 u32 nsid; 37 __le32 anagrpid; 38 bool is_shared; 39 bool is_readonly; 40 bool is_ready; 41 }; 42 43 unsigned int admin_timeout = 60; 44 module_param(admin_timeout, uint, 0644); 45 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 46 EXPORT_SYMBOL_GPL(admin_timeout); 47 48 unsigned int nvme_io_timeout = 30; 49 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 50 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 51 EXPORT_SYMBOL_GPL(nvme_io_timeout); 52 53 static unsigned char shutdown_timeout = 5; 54 module_param(shutdown_timeout, byte, 0644); 55 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 56 57 static u8 nvme_max_retries = 5; 58 module_param_named(max_retries, nvme_max_retries, byte, 0644); 59 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 60 61 static unsigned long default_ps_max_latency_us = 100000; 62 module_param(default_ps_max_latency_us, ulong, 0644); 63 MODULE_PARM_DESC(default_ps_max_latency_us, 64 "max power saving latency for new devices; use PM QOS to change per device"); 65 66 static bool force_apst; 67 module_param(force_apst, bool, 0644); 68 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 69 70 static unsigned long apst_primary_timeout_ms = 100; 71 module_param(apst_primary_timeout_ms, ulong, 0644); 72 MODULE_PARM_DESC(apst_primary_timeout_ms, 73 "primary APST timeout in ms"); 74 75 static unsigned long apst_secondary_timeout_ms = 2000; 76 module_param(apst_secondary_timeout_ms, ulong, 0644); 77 MODULE_PARM_DESC(apst_secondary_timeout_ms, 78 "secondary APST timeout in ms"); 79 80 static unsigned long apst_primary_latency_tol_us = 15000; 81 module_param(apst_primary_latency_tol_us, ulong, 0644); 82 MODULE_PARM_DESC(apst_primary_latency_tol_us, 83 "primary APST latency tolerance in us"); 84 85 static unsigned long apst_secondary_latency_tol_us = 100000; 86 module_param(apst_secondary_latency_tol_us, ulong, 0644); 87 MODULE_PARM_DESC(apst_secondary_latency_tol_us, 88 "secondary APST latency tolerance in us"); 89 90 /* 91 * nvme_wq - hosts nvme related works that are not reset or delete 92 * nvme_reset_wq - hosts nvme reset works 93 * nvme_delete_wq - hosts nvme delete works 94 * 95 * nvme_wq will host works such as scan, aen handling, fw activation, 96 * keep-alive, periodic reconnects etc. nvme_reset_wq 97 * runs reset works which also flush works hosted on nvme_wq for 98 * serialization purposes. nvme_delete_wq host controller deletion 99 * works which flush reset works for serialization. 100 */ 101 struct workqueue_struct *nvme_wq; 102 EXPORT_SYMBOL_GPL(nvme_wq); 103 104 struct workqueue_struct *nvme_reset_wq; 105 EXPORT_SYMBOL_GPL(nvme_reset_wq); 106 107 struct workqueue_struct *nvme_delete_wq; 108 EXPORT_SYMBOL_GPL(nvme_delete_wq); 109 110 static LIST_HEAD(nvme_subsystems); 111 static DEFINE_MUTEX(nvme_subsystems_lock); 112 113 static DEFINE_IDA(nvme_instance_ida); 114 static dev_t nvme_ctrl_base_chr_devt; 115 static struct class *nvme_class; 116 static struct class *nvme_subsys_class; 117 118 static DEFINE_IDA(nvme_ns_chr_minor_ida); 119 static dev_t nvme_ns_chr_devt; 120 static struct class *nvme_ns_chr_class; 121 122 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 123 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 124 unsigned nsid); 125 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 126 struct nvme_command *cmd); 127 128 void nvme_queue_scan(struct nvme_ctrl *ctrl) 129 { 130 /* 131 * Only new queue scan work when admin and IO queues are both alive 132 */ 133 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) 134 queue_work(nvme_wq, &ctrl->scan_work); 135 } 136 137 /* 138 * Use this function to proceed with scheduling reset_work for a controller 139 * that had previously been set to the resetting state. This is intended for 140 * code paths that can't be interrupted by other reset attempts. A hot removal 141 * may prevent this from succeeding. 142 */ 143 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) 144 { 145 if (ctrl->state != NVME_CTRL_RESETTING) 146 return -EBUSY; 147 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 148 return -EBUSY; 149 return 0; 150 } 151 EXPORT_SYMBOL_GPL(nvme_try_sched_reset); 152 153 static void nvme_failfast_work(struct work_struct *work) 154 { 155 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 156 struct nvme_ctrl, failfast_work); 157 158 if (ctrl->state != NVME_CTRL_CONNECTING) 159 return; 160 161 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 162 dev_info(ctrl->device, "failfast expired\n"); 163 nvme_kick_requeue_lists(ctrl); 164 } 165 166 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) 167 { 168 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) 169 return; 170 171 schedule_delayed_work(&ctrl->failfast_work, 172 ctrl->opts->fast_io_fail_tmo * HZ); 173 } 174 175 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) 176 { 177 if (!ctrl->opts) 178 return; 179 180 cancel_delayed_work_sync(&ctrl->failfast_work); 181 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 182 } 183 184 185 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 186 { 187 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 188 return -EBUSY; 189 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 190 return -EBUSY; 191 return 0; 192 } 193 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 194 195 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 196 { 197 int ret; 198 199 ret = nvme_reset_ctrl(ctrl); 200 if (!ret) { 201 flush_work(&ctrl->reset_work); 202 if (ctrl->state != NVME_CTRL_LIVE) 203 ret = -ENETRESET; 204 } 205 206 return ret; 207 } 208 209 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 210 { 211 dev_info(ctrl->device, 212 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl)); 213 214 flush_work(&ctrl->reset_work); 215 nvme_stop_ctrl(ctrl); 216 nvme_remove_namespaces(ctrl); 217 ctrl->ops->delete_ctrl(ctrl); 218 nvme_uninit_ctrl(ctrl); 219 } 220 221 static void nvme_delete_ctrl_work(struct work_struct *work) 222 { 223 struct nvme_ctrl *ctrl = 224 container_of(work, struct nvme_ctrl, delete_work); 225 226 nvme_do_delete_ctrl(ctrl); 227 } 228 229 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 230 { 231 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 232 return -EBUSY; 233 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 234 return -EBUSY; 235 return 0; 236 } 237 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 238 239 static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 240 { 241 /* 242 * Keep a reference until nvme_do_delete_ctrl() complete, 243 * since ->delete_ctrl can free the controller. 244 */ 245 nvme_get_ctrl(ctrl); 246 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 247 nvme_do_delete_ctrl(ctrl); 248 nvme_put_ctrl(ctrl); 249 } 250 251 static blk_status_t nvme_error_status(u16 status) 252 { 253 switch (status & 0x7ff) { 254 case NVME_SC_SUCCESS: 255 return BLK_STS_OK; 256 case NVME_SC_CAP_EXCEEDED: 257 return BLK_STS_NOSPC; 258 case NVME_SC_LBA_RANGE: 259 case NVME_SC_CMD_INTERRUPTED: 260 case NVME_SC_NS_NOT_READY: 261 return BLK_STS_TARGET; 262 case NVME_SC_BAD_ATTRIBUTES: 263 case NVME_SC_ONCS_NOT_SUPPORTED: 264 case NVME_SC_INVALID_OPCODE: 265 case NVME_SC_INVALID_FIELD: 266 case NVME_SC_INVALID_NS: 267 return BLK_STS_NOTSUPP; 268 case NVME_SC_WRITE_FAULT: 269 case NVME_SC_READ_ERROR: 270 case NVME_SC_UNWRITTEN_BLOCK: 271 case NVME_SC_ACCESS_DENIED: 272 case NVME_SC_READ_ONLY: 273 case NVME_SC_COMPARE_FAILED: 274 return BLK_STS_MEDIUM; 275 case NVME_SC_GUARD_CHECK: 276 case NVME_SC_APPTAG_CHECK: 277 case NVME_SC_REFTAG_CHECK: 278 case NVME_SC_INVALID_PI: 279 return BLK_STS_PROTECTION; 280 case NVME_SC_RESERVATION_CONFLICT: 281 return BLK_STS_NEXUS; 282 case NVME_SC_HOST_PATH_ERROR: 283 return BLK_STS_TRANSPORT; 284 case NVME_SC_ZONE_TOO_MANY_ACTIVE: 285 return BLK_STS_ZONE_ACTIVE_RESOURCE; 286 case NVME_SC_ZONE_TOO_MANY_OPEN: 287 return BLK_STS_ZONE_OPEN_RESOURCE; 288 default: 289 return BLK_STS_IOERR; 290 } 291 } 292 293 static void nvme_retry_req(struct request *req) 294 { 295 unsigned long delay = 0; 296 u16 crd; 297 298 /* The mask and shift result must be <= 3 */ 299 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 300 if (crd) 301 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; 302 303 nvme_req(req)->retries++; 304 blk_mq_requeue_request(req, false); 305 blk_mq_delay_kick_requeue_list(req->q, delay); 306 } 307 308 static void nvme_log_error(struct request *req) 309 { 310 struct nvme_ns *ns = req->q->queuedata; 311 struct nvme_request *nr = nvme_req(req); 312 313 if (ns) { 314 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", 315 ns->disk ? ns->disk->disk_name : "?", 316 nvme_get_opcode_str(nr->cmd->common.opcode), 317 nr->cmd->common.opcode, 318 (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)), 319 (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift, 320 nvme_get_error_status_str(nr->status), 321 nr->status >> 8 & 7, /* Status Code Type */ 322 nr->status & 0xff, /* Status Code */ 323 nr->status & NVME_SC_MORE ? "MORE " : "", 324 nr->status & NVME_SC_DNR ? "DNR " : ""); 325 return; 326 } 327 328 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n", 329 dev_name(nr->ctrl->device), 330 nvme_get_admin_opcode_str(nr->cmd->common.opcode), 331 nr->cmd->common.opcode, 332 nvme_get_error_status_str(nr->status), 333 nr->status >> 8 & 7, /* Status Code Type */ 334 nr->status & 0xff, /* Status Code */ 335 nr->status & NVME_SC_MORE ? "MORE " : "", 336 nr->status & NVME_SC_DNR ? "DNR " : ""); 337 } 338 339 enum nvme_disposition { 340 COMPLETE, 341 RETRY, 342 FAILOVER, 343 AUTHENTICATE, 344 }; 345 346 static inline enum nvme_disposition nvme_decide_disposition(struct request *req) 347 { 348 if (likely(nvme_req(req)->status == 0)) 349 return COMPLETE; 350 351 if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) 352 return AUTHENTICATE; 353 354 if (blk_noretry_request(req) || 355 (nvme_req(req)->status & NVME_SC_DNR) || 356 nvme_req(req)->retries >= nvme_max_retries) 357 return COMPLETE; 358 359 if (req->cmd_flags & REQ_NVME_MPATH) { 360 if (nvme_is_path_error(nvme_req(req)->status) || 361 blk_queue_dying(req->q)) 362 return FAILOVER; 363 } else { 364 if (blk_queue_dying(req->q)) 365 return COMPLETE; 366 } 367 368 return RETRY; 369 } 370 371 static inline void nvme_end_req_zoned(struct request *req) 372 { 373 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 374 req_op(req) == REQ_OP_ZONE_APPEND) 375 req->__sector = nvme_lba_to_sect(req->q->queuedata, 376 le64_to_cpu(nvme_req(req)->result.u64)); 377 } 378 379 static inline void nvme_end_req(struct request *req) 380 { 381 blk_status_t status = nvme_error_status(nvme_req(req)->status); 382 383 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) 384 nvme_log_error(req); 385 nvme_end_req_zoned(req); 386 nvme_trace_bio_complete(req); 387 blk_mq_end_request(req, status); 388 } 389 390 void nvme_complete_rq(struct request *req) 391 { 392 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 393 394 trace_nvme_complete_rq(req); 395 nvme_cleanup_cmd(req); 396 397 if (ctrl->kas) 398 ctrl->comp_seen = true; 399 400 switch (nvme_decide_disposition(req)) { 401 case COMPLETE: 402 nvme_end_req(req); 403 return; 404 case RETRY: 405 nvme_retry_req(req); 406 return; 407 case FAILOVER: 408 nvme_failover_req(req); 409 return; 410 case AUTHENTICATE: 411 #ifdef CONFIG_NVME_AUTH 412 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 413 nvme_retry_req(req); 414 #else 415 nvme_end_req(req); 416 #endif 417 return; 418 } 419 } 420 EXPORT_SYMBOL_GPL(nvme_complete_rq); 421 422 void nvme_complete_batch_req(struct request *req) 423 { 424 trace_nvme_complete_rq(req); 425 nvme_cleanup_cmd(req); 426 nvme_end_req_zoned(req); 427 } 428 EXPORT_SYMBOL_GPL(nvme_complete_batch_req); 429 430 /* 431 * Called to unwind from ->queue_rq on a failed command submission so that the 432 * multipathing code gets called to potentially failover to another path. 433 * The caller needs to unwind all transport specific resource allocations and 434 * must return propagate the return value. 435 */ 436 blk_status_t nvme_host_path_error(struct request *req) 437 { 438 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; 439 blk_mq_set_request_complete(req); 440 nvme_complete_rq(req); 441 return BLK_STS_OK; 442 } 443 EXPORT_SYMBOL_GPL(nvme_host_path_error); 444 445 bool nvme_cancel_request(struct request *req, void *data) 446 { 447 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 448 "Cancelling I/O %d", req->tag); 449 450 /* don't abort one completed request */ 451 if (blk_mq_request_completed(req)) 452 return true; 453 454 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; 455 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 456 blk_mq_complete_request(req); 457 return true; 458 } 459 EXPORT_SYMBOL_GPL(nvme_cancel_request); 460 461 void nvme_cancel_tagset(struct nvme_ctrl *ctrl) 462 { 463 if (ctrl->tagset) { 464 blk_mq_tagset_busy_iter(ctrl->tagset, 465 nvme_cancel_request, ctrl); 466 blk_mq_tagset_wait_completed_request(ctrl->tagset); 467 } 468 } 469 EXPORT_SYMBOL_GPL(nvme_cancel_tagset); 470 471 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) 472 { 473 if (ctrl->admin_tagset) { 474 blk_mq_tagset_busy_iter(ctrl->admin_tagset, 475 nvme_cancel_request, ctrl); 476 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 477 } 478 } 479 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); 480 481 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 482 enum nvme_ctrl_state new_state) 483 { 484 enum nvme_ctrl_state old_state; 485 unsigned long flags; 486 bool changed = false; 487 488 spin_lock_irqsave(&ctrl->lock, flags); 489 490 old_state = ctrl->state; 491 switch (new_state) { 492 case NVME_CTRL_LIVE: 493 switch (old_state) { 494 case NVME_CTRL_NEW: 495 case NVME_CTRL_RESETTING: 496 case NVME_CTRL_CONNECTING: 497 changed = true; 498 fallthrough; 499 default: 500 break; 501 } 502 break; 503 case NVME_CTRL_RESETTING: 504 switch (old_state) { 505 case NVME_CTRL_NEW: 506 case NVME_CTRL_LIVE: 507 changed = true; 508 fallthrough; 509 default: 510 break; 511 } 512 break; 513 case NVME_CTRL_CONNECTING: 514 switch (old_state) { 515 case NVME_CTRL_NEW: 516 case NVME_CTRL_RESETTING: 517 changed = true; 518 fallthrough; 519 default: 520 break; 521 } 522 break; 523 case NVME_CTRL_DELETING: 524 switch (old_state) { 525 case NVME_CTRL_LIVE: 526 case NVME_CTRL_RESETTING: 527 case NVME_CTRL_CONNECTING: 528 changed = true; 529 fallthrough; 530 default: 531 break; 532 } 533 break; 534 case NVME_CTRL_DELETING_NOIO: 535 switch (old_state) { 536 case NVME_CTRL_DELETING: 537 case NVME_CTRL_DEAD: 538 changed = true; 539 fallthrough; 540 default: 541 break; 542 } 543 break; 544 case NVME_CTRL_DEAD: 545 switch (old_state) { 546 case NVME_CTRL_DELETING: 547 changed = true; 548 fallthrough; 549 default: 550 break; 551 } 552 break; 553 default: 554 break; 555 } 556 557 if (changed) { 558 ctrl->state = new_state; 559 wake_up_all(&ctrl->state_wq); 560 } 561 562 spin_unlock_irqrestore(&ctrl->lock, flags); 563 if (!changed) 564 return false; 565 566 if (ctrl->state == NVME_CTRL_LIVE) { 567 if (old_state == NVME_CTRL_CONNECTING) 568 nvme_stop_failfast_work(ctrl); 569 nvme_kick_requeue_lists(ctrl); 570 } else if (ctrl->state == NVME_CTRL_CONNECTING && 571 old_state == NVME_CTRL_RESETTING) { 572 nvme_start_failfast_work(ctrl); 573 } 574 return changed; 575 } 576 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 577 578 /* 579 * Returns true for sink states that can't ever transition back to live. 580 */ 581 static bool nvme_state_terminal(struct nvme_ctrl *ctrl) 582 { 583 switch (ctrl->state) { 584 case NVME_CTRL_NEW: 585 case NVME_CTRL_LIVE: 586 case NVME_CTRL_RESETTING: 587 case NVME_CTRL_CONNECTING: 588 return false; 589 case NVME_CTRL_DELETING: 590 case NVME_CTRL_DELETING_NOIO: 591 case NVME_CTRL_DEAD: 592 return true; 593 default: 594 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); 595 return true; 596 } 597 } 598 599 /* 600 * Waits for the controller state to be resetting, or returns false if it is 601 * not possible to ever transition to that state. 602 */ 603 bool nvme_wait_reset(struct nvme_ctrl *ctrl) 604 { 605 wait_event(ctrl->state_wq, 606 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || 607 nvme_state_terminal(ctrl)); 608 return ctrl->state == NVME_CTRL_RESETTING; 609 } 610 EXPORT_SYMBOL_GPL(nvme_wait_reset); 611 612 static void nvme_free_ns_head(struct kref *ref) 613 { 614 struct nvme_ns_head *head = 615 container_of(ref, struct nvme_ns_head, ref); 616 617 nvme_mpath_remove_disk(head); 618 ida_free(&head->subsys->ns_ida, head->instance); 619 cleanup_srcu_struct(&head->srcu); 620 nvme_put_subsystem(head->subsys); 621 kfree(head); 622 } 623 624 bool nvme_tryget_ns_head(struct nvme_ns_head *head) 625 { 626 return kref_get_unless_zero(&head->ref); 627 } 628 629 void nvme_put_ns_head(struct nvme_ns_head *head) 630 { 631 kref_put(&head->ref, nvme_free_ns_head); 632 } 633 634 static void nvme_free_ns(struct kref *kref) 635 { 636 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 637 638 put_disk(ns->disk); 639 nvme_put_ns_head(ns->head); 640 nvme_put_ctrl(ns->ctrl); 641 kfree(ns); 642 } 643 644 static inline bool nvme_get_ns(struct nvme_ns *ns) 645 { 646 return kref_get_unless_zero(&ns->kref); 647 } 648 649 void nvme_put_ns(struct nvme_ns *ns) 650 { 651 kref_put(&ns->kref, nvme_free_ns); 652 } 653 EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); 654 655 static inline void nvme_clear_nvme_request(struct request *req) 656 { 657 nvme_req(req)->status = 0; 658 nvme_req(req)->retries = 0; 659 nvme_req(req)->flags = 0; 660 req->rq_flags |= RQF_DONTPREP; 661 } 662 663 /* initialize a passthrough request */ 664 void nvme_init_request(struct request *req, struct nvme_command *cmd) 665 { 666 if (req->q->queuedata) 667 req->timeout = NVME_IO_TIMEOUT; 668 else /* no queuedata implies admin queue */ 669 req->timeout = NVME_ADMIN_TIMEOUT; 670 671 /* passthru commands should let the driver set the SGL flags */ 672 cmd->common.flags &= ~NVME_CMD_SGL_ALL; 673 674 req->cmd_flags |= REQ_FAILFAST_DRIVER; 675 if (req->mq_hctx->type == HCTX_TYPE_POLL) 676 req->cmd_flags |= REQ_POLLED; 677 nvme_clear_nvme_request(req); 678 memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); 679 } 680 EXPORT_SYMBOL_GPL(nvme_init_request); 681 682 /* 683 * For something we're not in a state to send to the device the default action 684 * is to busy it and retry it after the controller state is recovered. However, 685 * if the controller is deleting or if anything is marked for failfast or 686 * nvme multipath it is immediately failed. 687 * 688 * Note: commands used to initialize the controller will be marked for failfast. 689 * Note: nvme cli/ioctl commands are marked for failfast. 690 */ 691 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, 692 struct request *rq) 693 { 694 if (ctrl->state != NVME_CTRL_DELETING_NOIO && 695 ctrl->state != NVME_CTRL_DELETING && 696 ctrl->state != NVME_CTRL_DEAD && 697 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && 698 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 699 return BLK_STS_RESOURCE; 700 return nvme_host_path_error(rq); 701 } 702 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); 703 704 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 705 bool queue_live) 706 { 707 struct nvme_request *req = nvme_req(rq); 708 709 /* 710 * currently we have a problem sending passthru commands 711 * on the admin_q if the controller is not LIVE because we can't 712 * make sure that they are going out after the admin connect, 713 * controller enable and/or other commands in the initialization 714 * sequence. until the controller will be LIVE, fail with 715 * BLK_STS_RESOURCE so that they will be rescheduled. 716 */ 717 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) 718 return false; 719 720 if (ctrl->ops->flags & NVME_F_FABRICS) { 721 /* 722 * Only allow commands on a live queue, except for the connect 723 * command, which is require to set the queue live in the 724 * appropinquate states. 725 */ 726 switch (ctrl->state) { 727 case NVME_CTRL_CONNECTING: 728 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && 729 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || 730 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || 731 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) 732 return true; 733 break; 734 default: 735 break; 736 case NVME_CTRL_DEAD: 737 return false; 738 } 739 } 740 741 return queue_live; 742 } 743 EXPORT_SYMBOL_GPL(__nvme_check_ready); 744 745 static inline void nvme_setup_flush(struct nvme_ns *ns, 746 struct nvme_command *cmnd) 747 { 748 memset(cmnd, 0, sizeof(*cmnd)); 749 cmnd->common.opcode = nvme_cmd_flush; 750 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 751 } 752 753 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 754 struct nvme_command *cmnd) 755 { 756 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 757 struct nvme_dsm_range *range; 758 struct bio *bio; 759 760 /* 761 * Some devices do not consider the DSM 'Number of Ranges' field when 762 * determining how much data to DMA. Always allocate memory for maximum 763 * number of segments to prevent device reading beyond end of buffer. 764 */ 765 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; 766 767 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); 768 if (!range) { 769 /* 770 * If we fail allocation our range, fallback to the controller 771 * discard page. If that's also busy, it's safe to return 772 * busy, as we know we can make progress once that's freed. 773 */ 774 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 775 return BLK_STS_RESOURCE; 776 777 range = page_address(ns->ctrl->discard_page); 778 } 779 780 __rq_for_each_bio(bio, req) { 781 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); 782 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 783 784 if (n < segments) { 785 range[n].cattr = cpu_to_le32(0); 786 range[n].nlb = cpu_to_le32(nlb); 787 range[n].slba = cpu_to_le64(slba); 788 } 789 n++; 790 } 791 792 if (WARN_ON_ONCE(n != segments)) { 793 if (virt_to_page(range) == ns->ctrl->discard_page) 794 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 795 else 796 kfree(range); 797 return BLK_STS_IOERR; 798 } 799 800 memset(cmnd, 0, sizeof(*cmnd)); 801 cmnd->dsm.opcode = nvme_cmd_dsm; 802 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 803 cmnd->dsm.nr = cpu_to_le32(segments - 1); 804 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 805 806 req->special_vec.bv_page = virt_to_page(range); 807 req->special_vec.bv_offset = offset_in_page(range); 808 req->special_vec.bv_len = alloc_size; 809 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 810 811 return BLK_STS_OK; 812 } 813 814 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, 815 struct request *req) 816 { 817 u32 upper, lower; 818 u64 ref48; 819 820 /* both rw and write zeroes share the same reftag format */ 821 switch (ns->guard_type) { 822 case NVME_NVM_NS_16B_GUARD: 823 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 824 break; 825 case NVME_NVM_NS_64B_GUARD: 826 ref48 = ext_pi_ref_tag(req); 827 lower = lower_32_bits(ref48); 828 upper = upper_32_bits(ref48); 829 830 cmnd->rw.reftag = cpu_to_le32(lower); 831 cmnd->rw.cdw3 = cpu_to_le32(upper); 832 break; 833 default: 834 break; 835 } 836 } 837 838 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 839 struct request *req, struct nvme_command *cmnd) 840 { 841 memset(cmnd, 0, sizeof(*cmnd)); 842 843 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 844 return nvme_setup_discard(ns, req, cmnd); 845 846 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 847 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 848 cmnd->write_zeroes.slba = 849 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 850 cmnd->write_zeroes.length = 851 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 852 853 if (nvme_ns_has_pi(ns)) { 854 cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT); 855 856 switch (ns->pi_type) { 857 case NVME_NS_DPS_PI_TYPE1: 858 case NVME_NS_DPS_PI_TYPE2: 859 nvme_set_ref_tag(ns, cmnd, req); 860 break; 861 } 862 } 863 864 return BLK_STS_OK; 865 } 866 867 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 868 struct request *req, struct nvme_command *cmnd, 869 enum nvme_opcode op) 870 { 871 u16 control = 0; 872 u32 dsmgmt = 0; 873 874 if (req->cmd_flags & REQ_FUA) 875 control |= NVME_RW_FUA; 876 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 877 control |= NVME_RW_LR; 878 879 if (req->cmd_flags & REQ_RAHEAD) 880 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 881 882 cmnd->rw.opcode = op; 883 cmnd->rw.flags = 0; 884 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 885 cmnd->rw.cdw2 = 0; 886 cmnd->rw.cdw3 = 0; 887 cmnd->rw.metadata = 0; 888 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 889 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 890 cmnd->rw.reftag = 0; 891 cmnd->rw.apptag = 0; 892 cmnd->rw.appmask = 0; 893 894 if (ns->ms) { 895 /* 896 * If formated with metadata, the block layer always provides a 897 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 898 * we enable the PRACT bit for protection information or set the 899 * namespace capacity to zero to prevent any I/O. 900 */ 901 if (!blk_integrity_rq(req)) { 902 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 903 return BLK_STS_NOTSUPP; 904 control |= NVME_RW_PRINFO_PRACT; 905 } 906 907 switch (ns->pi_type) { 908 case NVME_NS_DPS_PI_TYPE3: 909 control |= NVME_RW_PRINFO_PRCHK_GUARD; 910 break; 911 case NVME_NS_DPS_PI_TYPE1: 912 case NVME_NS_DPS_PI_TYPE2: 913 control |= NVME_RW_PRINFO_PRCHK_GUARD | 914 NVME_RW_PRINFO_PRCHK_REF; 915 if (op == nvme_cmd_zone_append) 916 control |= NVME_RW_APPEND_PIREMAP; 917 nvme_set_ref_tag(ns, cmnd, req); 918 break; 919 } 920 } 921 922 cmnd->rw.control = cpu_to_le16(control); 923 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 924 return 0; 925 } 926 927 void nvme_cleanup_cmd(struct request *req) 928 { 929 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 930 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 931 932 if (req->special_vec.bv_page == ctrl->discard_page) 933 clear_bit_unlock(0, &ctrl->discard_page_busy); 934 else 935 kfree(bvec_virt(&req->special_vec)); 936 } 937 } 938 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 939 940 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 941 { 942 struct nvme_command *cmd = nvme_req(req)->cmd; 943 blk_status_t ret = BLK_STS_OK; 944 945 if (!(req->rq_flags & RQF_DONTPREP)) 946 nvme_clear_nvme_request(req); 947 948 switch (req_op(req)) { 949 case REQ_OP_DRV_IN: 950 case REQ_OP_DRV_OUT: 951 /* these are setup prior to execution in nvme_init_request() */ 952 break; 953 case REQ_OP_FLUSH: 954 nvme_setup_flush(ns, cmd); 955 break; 956 case REQ_OP_ZONE_RESET_ALL: 957 case REQ_OP_ZONE_RESET: 958 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); 959 break; 960 case REQ_OP_ZONE_OPEN: 961 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); 962 break; 963 case REQ_OP_ZONE_CLOSE: 964 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); 965 break; 966 case REQ_OP_ZONE_FINISH: 967 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); 968 break; 969 case REQ_OP_WRITE_ZEROES: 970 ret = nvme_setup_write_zeroes(ns, req, cmd); 971 break; 972 case REQ_OP_DISCARD: 973 ret = nvme_setup_discard(ns, req, cmd); 974 break; 975 case REQ_OP_READ: 976 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); 977 break; 978 case REQ_OP_WRITE: 979 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); 980 break; 981 case REQ_OP_ZONE_APPEND: 982 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); 983 break; 984 default: 985 WARN_ON_ONCE(1); 986 return BLK_STS_IOERR; 987 } 988 989 cmd->common.command_id = nvme_cid(req); 990 trace_nvme_setup_cmd(req, cmd); 991 return ret; 992 } 993 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 994 995 /* 996 * Return values: 997 * 0: success 998 * >0: nvme controller's cqe status response 999 * <0: kernel error in lieu of controller response 1000 */ 1001 static int nvme_execute_rq(struct request *rq, bool at_head) 1002 { 1003 blk_status_t status; 1004 1005 status = blk_execute_rq(rq, at_head); 1006 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) 1007 return -EINTR; 1008 if (nvme_req(rq)->status) 1009 return nvme_req(rq)->status; 1010 return blk_status_to_errno(status); 1011 } 1012 1013 /* 1014 * Returns 0 on success. If the result is negative, it's a Linux error code; 1015 * if the result is positive, it's an NVM Express status code 1016 */ 1017 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1018 union nvme_result *result, void *buffer, unsigned bufflen, 1019 int qid, int at_head, blk_mq_req_flags_t flags) 1020 { 1021 struct request *req; 1022 int ret; 1023 1024 if (qid == NVME_QID_ANY) 1025 req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); 1026 else 1027 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, 1028 qid - 1); 1029 1030 if (IS_ERR(req)) 1031 return PTR_ERR(req); 1032 nvme_init_request(req, cmd); 1033 1034 if (buffer && bufflen) { 1035 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 1036 if (ret) 1037 goto out; 1038 } 1039 1040 req->rq_flags |= RQF_QUIET; 1041 ret = nvme_execute_rq(req, at_head); 1042 if (result && ret >= 0) 1043 *result = nvme_req(req)->result; 1044 out: 1045 blk_mq_free_request(req); 1046 return ret; 1047 } 1048 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 1049 1050 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1051 void *buffer, unsigned bufflen) 1052 { 1053 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 1054 NVME_QID_ANY, 0, 0); 1055 } 1056 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 1057 1058 static u32 nvme_known_admin_effects(u8 opcode) 1059 { 1060 switch (opcode) { 1061 case nvme_admin_format_nvm: 1062 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC | 1063 NVME_CMD_EFFECTS_CSE_MASK; 1064 case nvme_admin_sanitize_nvm: 1065 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK; 1066 default: 1067 break; 1068 } 1069 return 0; 1070 } 1071 1072 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1073 { 1074 u32 effects = 0; 1075 1076 if (ns) { 1077 if (ns->head->effects) 1078 effects = le32_to_cpu(ns->head->effects->iocs[opcode]); 1079 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1080 dev_warn_once(ctrl->device, 1081 "IO command:%02x has unhandled effects:%08x\n", 1082 opcode, effects); 1083 return 0; 1084 } 1085 1086 if (ctrl->effects) 1087 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1088 effects |= nvme_known_admin_effects(opcode); 1089 1090 return effects; 1091 } 1092 EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); 1093 1094 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1095 u8 opcode) 1096 { 1097 u32 effects = nvme_command_effects(ctrl, ns, opcode); 1098 1099 /* 1100 * For simplicity, IO to all namespaces is quiesced even if the command 1101 * effects say only one namespace is affected. 1102 */ 1103 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1104 mutex_lock(&ctrl->scan_lock); 1105 mutex_lock(&ctrl->subsys->lock); 1106 nvme_mpath_start_freeze(ctrl->subsys); 1107 nvme_mpath_wait_freeze(ctrl->subsys); 1108 nvme_start_freeze(ctrl); 1109 nvme_wait_freeze(ctrl); 1110 } 1111 return effects; 1112 } 1113 1114 void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, 1115 struct nvme_command *cmd, int status) 1116 { 1117 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1118 nvme_unfreeze(ctrl); 1119 nvme_mpath_unfreeze(ctrl->subsys); 1120 mutex_unlock(&ctrl->subsys->lock); 1121 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); 1122 mutex_unlock(&ctrl->scan_lock); 1123 } 1124 if (effects & NVME_CMD_EFFECTS_CCC) 1125 nvme_init_ctrl_finish(ctrl); 1126 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { 1127 nvme_queue_scan(ctrl); 1128 flush_work(&ctrl->scan_work); 1129 } 1130 1131 switch (cmd->common.opcode) { 1132 case nvme_admin_set_features: 1133 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) { 1134 case NVME_FEAT_KATO: 1135 /* 1136 * Keep alive commands interval on the host should be 1137 * updated when KATO is modified by Set Features 1138 * commands. 1139 */ 1140 if (!status) 1141 nvme_update_keep_alive(ctrl, cmd); 1142 break; 1143 default: 1144 break; 1145 } 1146 break; 1147 default: 1148 break; 1149 } 1150 } 1151 EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); 1152 1153 int nvme_execute_passthru_rq(struct request *rq, u32 *effects) 1154 { 1155 struct nvme_command *cmd = nvme_req(rq)->cmd; 1156 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; 1157 struct nvme_ns *ns = rq->q->queuedata; 1158 1159 *effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); 1160 return nvme_execute_rq(rq, false); 1161 } 1162 EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU); 1163 1164 /* 1165 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: 1166 * 1167 * The host should send Keep Alive commands at half of the Keep Alive Timeout 1168 * accounting for transport roundtrip times [..]. 1169 */ 1170 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) 1171 { 1172 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2); 1173 } 1174 1175 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, 1176 blk_status_t status) 1177 { 1178 struct nvme_ctrl *ctrl = rq->end_io_data; 1179 unsigned long flags; 1180 bool startka = false; 1181 1182 blk_mq_free_request(rq); 1183 1184 if (status) { 1185 dev_err(ctrl->device, 1186 "failed nvme_keep_alive_end_io error=%d\n", 1187 status); 1188 return RQ_END_IO_NONE; 1189 } 1190 1191 ctrl->comp_seen = false; 1192 spin_lock_irqsave(&ctrl->lock, flags); 1193 if (ctrl->state == NVME_CTRL_LIVE || 1194 ctrl->state == NVME_CTRL_CONNECTING) 1195 startka = true; 1196 spin_unlock_irqrestore(&ctrl->lock, flags); 1197 if (startka) 1198 nvme_queue_keep_alive_work(ctrl); 1199 return RQ_END_IO_NONE; 1200 } 1201 1202 static void nvme_keep_alive_work(struct work_struct *work) 1203 { 1204 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 1205 struct nvme_ctrl, ka_work); 1206 bool comp_seen = ctrl->comp_seen; 1207 struct request *rq; 1208 1209 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1210 dev_dbg(ctrl->device, 1211 "reschedule traffic based keep-alive timer\n"); 1212 ctrl->comp_seen = false; 1213 nvme_queue_keep_alive_work(ctrl); 1214 return; 1215 } 1216 1217 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd), 1218 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 1219 if (IS_ERR(rq)) { 1220 /* allocation failure, reset the controller */ 1221 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); 1222 nvme_reset_ctrl(ctrl); 1223 return; 1224 } 1225 nvme_init_request(rq, &ctrl->ka_cmd); 1226 1227 rq->timeout = ctrl->kato * HZ; 1228 rq->end_io = nvme_keep_alive_end_io; 1229 rq->end_io_data = ctrl; 1230 rq->rq_flags |= RQF_QUIET; 1231 blk_execute_rq_nowait(rq, false); 1232 } 1233 1234 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 1235 { 1236 if (unlikely(ctrl->kato == 0)) 1237 return; 1238 1239 nvme_queue_keep_alive_work(ctrl); 1240 } 1241 1242 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 1243 { 1244 if (unlikely(ctrl->kato == 0)) 1245 return; 1246 1247 cancel_delayed_work_sync(&ctrl->ka_work); 1248 } 1249 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 1250 1251 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 1252 struct nvme_command *cmd) 1253 { 1254 unsigned int new_kato = 1255 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000); 1256 1257 dev_info(ctrl->device, 1258 "keep alive interval updated from %u ms to %u ms\n", 1259 ctrl->kato * 1000 / 2, new_kato * 1000 / 2); 1260 1261 nvme_stop_keep_alive(ctrl); 1262 ctrl->kato = new_kato; 1263 nvme_start_keep_alive(ctrl); 1264 } 1265 1266 /* 1267 * In NVMe 1.0 the CNS field was just a binary controller or namespace 1268 * flag, thus sending any new CNS opcodes has a big chance of not working. 1269 * Qemu unfortunately had that bug after reporting a 1.1 version compliance 1270 * (but not for any later version). 1271 */ 1272 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) 1273 { 1274 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) 1275 return ctrl->vs < NVME_VS(1, 2, 0); 1276 return ctrl->vs < NVME_VS(1, 1, 0); 1277 } 1278 1279 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 1280 { 1281 struct nvme_command c = { }; 1282 int error; 1283 1284 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1285 c.identify.opcode = nvme_admin_identify; 1286 c.identify.cns = NVME_ID_CNS_CTRL; 1287 1288 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 1289 if (!*id) 1290 return -ENOMEM; 1291 1292 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1293 sizeof(struct nvme_id_ctrl)); 1294 if (error) 1295 kfree(*id); 1296 return error; 1297 } 1298 1299 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, 1300 struct nvme_ns_id_desc *cur, bool *csi_seen) 1301 { 1302 const char *warn_str = "ctrl returned bogus length:"; 1303 void *data = cur; 1304 1305 switch (cur->nidt) { 1306 case NVME_NIDT_EUI64: 1307 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1308 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", 1309 warn_str, cur->nidl); 1310 return -1; 1311 } 1312 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1313 return NVME_NIDT_EUI64_LEN; 1314 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); 1315 return NVME_NIDT_EUI64_LEN; 1316 case NVME_NIDT_NGUID: 1317 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1318 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", 1319 warn_str, cur->nidl); 1320 return -1; 1321 } 1322 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1323 return NVME_NIDT_NGUID_LEN; 1324 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); 1325 return NVME_NIDT_NGUID_LEN; 1326 case NVME_NIDT_UUID: 1327 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1328 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", 1329 warn_str, cur->nidl); 1330 return -1; 1331 } 1332 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1333 return NVME_NIDT_UUID_LEN; 1334 uuid_copy(&ids->uuid, data + sizeof(*cur)); 1335 return NVME_NIDT_UUID_LEN; 1336 case NVME_NIDT_CSI: 1337 if (cur->nidl != NVME_NIDT_CSI_LEN) { 1338 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", 1339 warn_str, cur->nidl); 1340 return -1; 1341 } 1342 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); 1343 *csi_seen = true; 1344 return NVME_NIDT_CSI_LEN; 1345 default: 1346 /* Skip unknown types */ 1347 return cur->nidl; 1348 } 1349 } 1350 1351 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, 1352 struct nvme_ns_info *info) 1353 { 1354 struct nvme_command c = { }; 1355 bool csi_seen = false; 1356 int status, pos, len; 1357 void *data; 1358 1359 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) 1360 return 0; 1361 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) 1362 return 0; 1363 1364 c.identify.opcode = nvme_admin_identify; 1365 c.identify.nsid = cpu_to_le32(info->nsid); 1366 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1367 1368 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1369 if (!data) 1370 return -ENOMEM; 1371 1372 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1373 NVME_IDENTIFY_DATA_SIZE); 1374 if (status) { 1375 dev_warn(ctrl->device, 1376 "Identify Descriptors failed (nsid=%u, status=0x%x)\n", 1377 info->nsid, status); 1378 goto free_data; 1379 } 1380 1381 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1382 struct nvme_ns_id_desc *cur = data + pos; 1383 1384 if (cur->nidl == 0) 1385 break; 1386 1387 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen); 1388 if (len < 0) 1389 break; 1390 1391 len += sizeof(*cur); 1392 } 1393 1394 if (nvme_multi_css(ctrl) && !csi_seen) { 1395 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", 1396 info->nsid); 1397 status = -EINVAL; 1398 } 1399 1400 free_data: 1401 kfree(data); 1402 return status; 1403 } 1404 1405 static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, 1406 struct nvme_id_ns **id) 1407 { 1408 struct nvme_command c = { }; 1409 int error; 1410 1411 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1412 c.identify.opcode = nvme_admin_identify; 1413 c.identify.nsid = cpu_to_le32(nsid); 1414 c.identify.cns = NVME_ID_CNS_NS; 1415 1416 *id = kmalloc(sizeof(**id), GFP_KERNEL); 1417 if (!*id) 1418 return -ENOMEM; 1419 1420 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); 1421 if (error) { 1422 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1423 goto out_free_id; 1424 } 1425 1426 error = NVME_SC_INVALID_NS | NVME_SC_DNR; 1427 if ((*id)->ncap == 0) /* namespace not allocated or attached */ 1428 goto out_free_id; 1429 return 0; 1430 1431 out_free_id: 1432 kfree(*id); 1433 return error; 1434 } 1435 1436 static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, 1437 struct nvme_ns_info *info) 1438 { 1439 struct nvme_ns_ids *ids = &info->ids; 1440 struct nvme_id_ns *id; 1441 int ret; 1442 1443 ret = nvme_identify_ns(ctrl, info->nsid, &id); 1444 if (ret) 1445 return ret; 1446 info->anagrpid = id->anagrpid; 1447 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1448 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1449 info->is_ready = true; 1450 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { 1451 dev_info(ctrl->device, 1452 "Ignoring bogus Namespace Identifiers\n"); 1453 } else { 1454 if (ctrl->vs >= NVME_VS(1, 1, 0) && 1455 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 1456 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64)); 1457 if (ctrl->vs >= NVME_VS(1, 2, 0) && 1458 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 1459 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); 1460 } 1461 kfree(id); 1462 return 0; 1463 } 1464 1465 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, 1466 struct nvme_ns_info *info) 1467 { 1468 struct nvme_id_ns_cs_indep *id; 1469 struct nvme_command c = { 1470 .identify.opcode = nvme_admin_identify, 1471 .identify.nsid = cpu_to_le32(info->nsid), 1472 .identify.cns = NVME_ID_CNS_NS_CS_INDEP, 1473 }; 1474 int ret; 1475 1476 id = kmalloc(sizeof(*id), GFP_KERNEL); 1477 if (!id) 1478 return -ENOMEM; 1479 1480 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 1481 if (!ret) { 1482 info->anagrpid = id->anagrpid; 1483 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1484 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1485 info->is_ready = id->nstat & NVME_NSTAT_NRDY; 1486 } 1487 kfree(id); 1488 return ret; 1489 } 1490 1491 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, 1492 unsigned int dword11, void *buffer, size_t buflen, u32 *result) 1493 { 1494 union nvme_result res = { 0 }; 1495 struct nvme_command c = { }; 1496 int ret; 1497 1498 c.features.opcode = op; 1499 c.features.fid = cpu_to_le32(fid); 1500 c.features.dword11 = cpu_to_le32(dword11); 1501 1502 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1503 buffer, buflen, NVME_QID_ANY, 0, 0); 1504 if (ret >= 0 && result) 1505 *result = le32_to_cpu(res.u32); 1506 return ret; 1507 } 1508 1509 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 1510 unsigned int dword11, void *buffer, size_t buflen, 1511 u32 *result) 1512 { 1513 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, 1514 buflen, result); 1515 } 1516 EXPORT_SYMBOL_GPL(nvme_set_features); 1517 1518 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 1519 unsigned int dword11, void *buffer, size_t buflen, 1520 u32 *result) 1521 { 1522 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, 1523 buflen, result); 1524 } 1525 EXPORT_SYMBOL_GPL(nvme_get_features); 1526 1527 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1528 { 1529 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1530 u32 result; 1531 int status, nr_io_queues; 1532 1533 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1534 &result); 1535 if (status < 0) 1536 return status; 1537 1538 /* 1539 * Degraded controllers might return an error when setting the queue 1540 * count. We still want to be able to bring them online and offer 1541 * access to the admin queue, as that might be only way to fix them up. 1542 */ 1543 if (status > 0) { 1544 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1545 *count = 0; 1546 } else { 1547 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1548 *count = min(*count, nr_io_queues); 1549 } 1550 1551 return 0; 1552 } 1553 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1554 1555 #define NVME_AEN_SUPPORTED \ 1556 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ 1557 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) 1558 1559 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1560 { 1561 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1562 int status; 1563 1564 if (!supported_aens) 1565 return; 1566 1567 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1568 NULL, 0, &result); 1569 if (status) 1570 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1571 supported_aens); 1572 1573 queue_work(nvme_wq, &ctrl->async_event_work); 1574 } 1575 1576 static int nvme_ns_open(struct nvme_ns *ns) 1577 { 1578 1579 /* should never be called due to GENHD_FL_HIDDEN */ 1580 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) 1581 goto fail; 1582 if (!nvme_get_ns(ns)) 1583 goto fail; 1584 if (!try_module_get(ns->ctrl->ops->module)) 1585 goto fail_put_ns; 1586 1587 return 0; 1588 1589 fail_put_ns: 1590 nvme_put_ns(ns); 1591 fail: 1592 return -ENXIO; 1593 } 1594 1595 static void nvme_ns_release(struct nvme_ns *ns) 1596 { 1597 1598 module_put(ns->ctrl->ops->module); 1599 nvme_put_ns(ns); 1600 } 1601 1602 static int nvme_open(struct block_device *bdev, fmode_t mode) 1603 { 1604 return nvme_ns_open(bdev->bd_disk->private_data); 1605 } 1606 1607 static void nvme_release(struct gendisk *disk, fmode_t mode) 1608 { 1609 nvme_ns_release(disk->private_data); 1610 } 1611 1612 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1613 { 1614 /* some standard values */ 1615 geo->heads = 1 << 6; 1616 geo->sectors = 1 << 5; 1617 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1618 return 0; 1619 } 1620 1621 #ifdef CONFIG_BLK_DEV_INTEGRITY 1622 static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, 1623 u32 max_integrity_segments) 1624 { 1625 struct blk_integrity integrity = { }; 1626 1627 switch (ns->pi_type) { 1628 case NVME_NS_DPS_PI_TYPE3: 1629 switch (ns->guard_type) { 1630 case NVME_NVM_NS_16B_GUARD: 1631 integrity.profile = &t10_pi_type3_crc; 1632 integrity.tag_size = sizeof(u16) + sizeof(u32); 1633 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1634 break; 1635 case NVME_NVM_NS_64B_GUARD: 1636 integrity.profile = &ext_pi_type3_crc64; 1637 integrity.tag_size = sizeof(u16) + 6; 1638 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1639 break; 1640 default: 1641 integrity.profile = NULL; 1642 break; 1643 } 1644 break; 1645 case NVME_NS_DPS_PI_TYPE1: 1646 case NVME_NS_DPS_PI_TYPE2: 1647 switch (ns->guard_type) { 1648 case NVME_NVM_NS_16B_GUARD: 1649 integrity.profile = &t10_pi_type1_crc; 1650 integrity.tag_size = sizeof(u16); 1651 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1652 break; 1653 case NVME_NVM_NS_64B_GUARD: 1654 integrity.profile = &ext_pi_type1_crc64; 1655 integrity.tag_size = sizeof(u16); 1656 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1657 break; 1658 default: 1659 integrity.profile = NULL; 1660 break; 1661 } 1662 break; 1663 default: 1664 integrity.profile = NULL; 1665 break; 1666 } 1667 1668 integrity.tuple_size = ns->ms; 1669 blk_integrity_register(disk, &integrity); 1670 blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); 1671 } 1672 #else 1673 static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, 1674 u32 max_integrity_segments) 1675 { 1676 } 1677 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1678 1679 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) 1680 { 1681 struct nvme_ctrl *ctrl = ns->ctrl; 1682 struct request_queue *queue = disk->queue; 1683 u32 size = queue_logical_block_size(queue); 1684 1685 if (ctrl->max_discard_sectors == 0) { 1686 blk_queue_max_discard_sectors(queue, 0); 1687 return; 1688 } 1689 1690 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1691 NVME_DSM_MAX_RANGES); 1692 1693 queue->limits.discard_granularity = size; 1694 1695 /* If discard is already enabled, don't reset queue limits */ 1696 if (queue->limits.max_discard_sectors) 1697 return; 1698 1699 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX)) 1700 ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl); 1701 1702 blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors); 1703 blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); 1704 1705 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1706 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1707 } 1708 1709 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1710 { 1711 return uuid_equal(&a->uuid, &b->uuid) && 1712 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1713 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && 1714 a->csi == b->csi; 1715 } 1716 1717 static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) 1718 { 1719 bool first = id->dps & NVME_NS_DPS_PI_FIRST; 1720 unsigned lbaf = nvme_lbaf_index(id->flbas); 1721 struct nvme_ctrl *ctrl = ns->ctrl; 1722 struct nvme_command c = { }; 1723 struct nvme_id_ns_nvm *nvm; 1724 int ret = 0; 1725 u32 elbaf; 1726 1727 ns->pi_size = 0; 1728 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 1729 if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { 1730 ns->pi_size = sizeof(struct t10_pi_tuple); 1731 ns->guard_type = NVME_NVM_NS_16B_GUARD; 1732 goto set_pi; 1733 } 1734 1735 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 1736 if (!nvm) 1737 return -ENOMEM; 1738 1739 c.identify.opcode = nvme_admin_identify; 1740 c.identify.nsid = cpu_to_le32(ns->head->ns_id); 1741 c.identify.cns = NVME_ID_CNS_CS_NS; 1742 c.identify.csi = NVME_CSI_NVM; 1743 1744 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm)); 1745 if (ret) 1746 goto free_data; 1747 1748 elbaf = le32_to_cpu(nvm->elbaf[lbaf]); 1749 1750 /* no support for storage tag formats right now */ 1751 if (nvme_elbaf_sts(elbaf)) 1752 goto free_data; 1753 1754 ns->guard_type = nvme_elbaf_guard_type(elbaf); 1755 switch (ns->guard_type) { 1756 case NVME_NVM_NS_64B_GUARD: 1757 ns->pi_size = sizeof(struct crc64_pi_tuple); 1758 break; 1759 case NVME_NVM_NS_16B_GUARD: 1760 ns->pi_size = sizeof(struct t10_pi_tuple); 1761 break; 1762 default: 1763 break; 1764 } 1765 1766 free_data: 1767 kfree(nvm); 1768 set_pi: 1769 if (ns->pi_size && (first || ns->ms == ns->pi_size)) 1770 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1771 else 1772 ns->pi_type = 0; 1773 1774 return ret; 1775 } 1776 1777 static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) 1778 { 1779 struct nvme_ctrl *ctrl = ns->ctrl; 1780 1781 if (nvme_init_ms(ns, id)) 1782 return; 1783 1784 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1785 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1786 return; 1787 1788 if (ctrl->ops->flags & NVME_F_FABRICS) { 1789 /* 1790 * The NVMe over Fabrics specification only supports metadata as 1791 * part of the extended data LBA. We rely on HCA/HBA support to 1792 * remap the separate metadata buffer from the block layer. 1793 */ 1794 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1795 return; 1796 1797 ns->features |= NVME_NS_EXT_LBAS; 1798 1799 /* 1800 * The current fabrics transport drivers support namespace 1801 * metadata formats only if nvme_ns_has_pi() returns true. 1802 * Suppress support for all other formats so the namespace will 1803 * have a 0 capacity and not be usable through the block stack. 1804 * 1805 * Note, this check will need to be modified if any drivers 1806 * gain the ability to use other metadata formats. 1807 */ 1808 if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) 1809 ns->features |= NVME_NS_METADATA_SUPPORTED; 1810 } else { 1811 /* 1812 * For PCIe controllers, we can't easily remap the separate 1813 * metadata buffer from the block layer and thus require a 1814 * separate metadata buffer for block layer metadata/PI support. 1815 * We allow extended LBAs for the passthrough interface, though. 1816 */ 1817 if (id->flbas & NVME_NS_FLBAS_META_EXT) 1818 ns->features |= NVME_NS_EXT_LBAS; 1819 else 1820 ns->features |= NVME_NS_METADATA_SUPPORTED; 1821 } 1822 } 1823 1824 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1825 struct request_queue *q) 1826 { 1827 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; 1828 1829 if (ctrl->max_hw_sectors) { 1830 u32 max_segments = 1831 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; 1832 1833 max_segments = min_not_zero(max_segments, ctrl->max_segments); 1834 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1835 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1836 } 1837 blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); 1838 blk_queue_dma_alignment(q, 3); 1839 blk_queue_write_cache(q, vwc, vwc); 1840 } 1841 1842 static void nvme_update_disk_info(struct gendisk *disk, 1843 struct nvme_ns *ns, struct nvme_id_ns *id) 1844 { 1845 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); 1846 unsigned short bs = 1 << ns->lba_shift; 1847 u32 atomic_bs, phys_bs, io_opt = 0; 1848 1849 /* 1850 * The block layer can't support LBA sizes larger than the page size 1851 * yet, so catch this early and don't allow block I/O. 1852 */ 1853 if (ns->lba_shift > PAGE_SHIFT) { 1854 capacity = 0; 1855 bs = (1 << 9); 1856 } 1857 1858 blk_integrity_unregister(disk); 1859 1860 atomic_bs = phys_bs = bs; 1861 if (id->nabo == 0) { 1862 /* 1863 * Bit 1 indicates whether NAWUPF is defined for this namespace 1864 * and whether it should be used instead of AWUPF. If NAWUPF == 1865 * 0 then AWUPF must be used instead. 1866 */ 1867 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) 1868 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; 1869 else 1870 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; 1871 } 1872 1873 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { 1874 /* NPWG = Namespace Preferred Write Granularity */ 1875 phys_bs = bs * (1 + le16_to_cpu(id->npwg)); 1876 /* NOWS = Namespace Optimal Write Size */ 1877 io_opt = bs * (1 + le16_to_cpu(id->nows)); 1878 } 1879 1880 blk_queue_logical_block_size(disk->queue, bs); 1881 /* 1882 * Linux filesystems assume writing a single physical block is 1883 * an atomic operation. Hence limit the physical block size to the 1884 * value of the Atomic Write Unit Power Fail parameter. 1885 */ 1886 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); 1887 blk_queue_io_min(disk->queue, phys_bs); 1888 blk_queue_io_opt(disk->queue, io_opt); 1889 1890 /* 1891 * Register a metadata profile for PI, or the plain non-integrity NVMe 1892 * metadata masquerading as Type 0 if supported, otherwise reject block 1893 * I/O to namespaces with metadata except when the namespace supports 1894 * PI, as it can strip/insert in that case. 1895 */ 1896 if (ns->ms) { 1897 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 1898 (ns->features & NVME_NS_METADATA_SUPPORTED)) 1899 nvme_init_integrity(disk, ns, 1900 ns->ctrl->max_integrity_segments); 1901 else if (!nvme_ns_has_pi(ns)) 1902 capacity = 0; 1903 } 1904 1905 set_capacity_and_notify(disk, capacity); 1906 1907 nvme_config_discard(disk, ns); 1908 blk_queue_max_write_zeroes_sectors(disk->queue, 1909 ns->ctrl->max_zeroes_sectors); 1910 } 1911 1912 static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) 1913 { 1914 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags); 1915 } 1916 1917 static inline bool nvme_first_scan(struct gendisk *disk) 1918 { 1919 /* nvme_alloc_ns() scans the disk prior to adding it */ 1920 return !disk_live(disk); 1921 } 1922 1923 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) 1924 { 1925 struct nvme_ctrl *ctrl = ns->ctrl; 1926 u32 iob; 1927 1928 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 1929 is_power_of_2(ctrl->max_hw_sectors)) 1930 iob = ctrl->max_hw_sectors; 1931 else 1932 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); 1933 1934 if (!iob) 1935 return; 1936 1937 if (!is_power_of_2(iob)) { 1938 if (nvme_first_scan(ns->disk)) 1939 pr_warn("%s: ignoring unaligned IO boundary:%u\n", 1940 ns->disk->disk_name, iob); 1941 return; 1942 } 1943 1944 if (blk_queue_is_zoned(ns->disk->queue)) { 1945 if (nvme_first_scan(ns->disk)) 1946 pr_warn("%s: ignoring zoned namespace IO boundary\n", 1947 ns->disk->disk_name); 1948 return; 1949 } 1950 1951 blk_queue_chunk_sectors(ns->queue, iob); 1952 } 1953 1954 static int nvme_update_ns_info_generic(struct nvme_ns *ns, 1955 struct nvme_ns_info *info) 1956 { 1957 blk_mq_freeze_queue(ns->disk->queue); 1958 nvme_set_queue_limits(ns->ctrl, ns->queue); 1959 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 1960 blk_mq_unfreeze_queue(ns->disk->queue); 1961 1962 if (nvme_ns_head_multipath(ns->head)) { 1963 blk_mq_freeze_queue(ns->head->disk->queue); 1964 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 1965 nvme_mpath_revalidate_paths(ns); 1966 blk_stack_limits(&ns->head->disk->queue->limits, 1967 &ns->queue->limits, 0); 1968 ns->head->disk->flags |= GENHD_FL_HIDDEN; 1969 blk_mq_unfreeze_queue(ns->head->disk->queue); 1970 } 1971 1972 /* Hide the block-interface for these devices */ 1973 ns->disk->flags |= GENHD_FL_HIDDEN; 1974 set_bit(NVME_NS_READY, &ns->flags); 1975 1976 return 0; 1977 } 1978 1979 static int nvme_update_ns_info_block(struct nvme_ns *ns, 1980 struct nvme_ns_info *info) 1981 { 1982 struct nvme_id_ns *id; 1983 unsigned lbaf; 1984 int ret; 1985 1986 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id); 1987 if (ret) 1988 return ret; 1989 1990 blk_mq_freeze_queue(ns->disk->queue); 1991 lbaf = nvme_lbaf_index(id->flbas); 1992 ns->lba_shift = id->lbaf[lbaf].ds; 1993 nvme_set_queue_limits(ns->ctrl, ns->queue); 1994 1995 nvme_configure_metadata(ns, id); 1996 nvme_set_chunk_sectors(ns, id); 1997 nvme_update_disk_info(ns->disk, ns, id); 1998 1999 if (ns->head->ids.csi == NVME_CSI_ZNS) { 2000 ret = nvme_update_zone_info(ns, lbaf); 2001 if (ret) { 2002 blk_mq_unfreeze_queue(ns->disk->queue); 2003 goto out; 2004 } 2005 } 2006 2007 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 2008 set_bit(NVME_NS_READY, &ns->flags); 2009 blk_mq_unfreeze_queue(ns->disk->queue); 2010 2011 if (blk_queue_is_zoned(ns->queue)) { 2012 ret = nvme_revalidate_zones(ns); 2013 if (ret && !nvme_first_scan(ns->disk)) 2014 goto out; 2015 } 2016 2017 if (nvme_ns_head_multipath(ns->head)) { 2018 blk_mq_freeze_queue(ns->head->disk->queue); 2019 nvme_update_disk_info(ns->head->disk, ns, id); 2020 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 2021 nvme_mpath_revalidate_paths(ns); 2022 blk_stack_limits(&ns->head->disk->queue->limits, 2023 &ns->queue->limits, 0); 2024 disk_update_readahead(ns->head->disk); 2025 blk_mq_unfreeze_queue(ns->head->disk->queue); 2026 } 2027 2028 ret = 0; 2029 out: 2030 /* 2031 * If probing fails due an unsupported feature, hide the block device, 2032 * but still allow other access. 2033 */ 2034 if (ret == -ENODEV) { 2035 ns->disk->flags |= GENHD_FL_HIDDEN; 2036 set_bit(NVME_NS_READY, &ns->flags); 2037 ret = 0; 2038 } 2039 kfree(id); 2040 return ret; 2041 } 2042 2043 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) 2044 { 2045 switch (info->ids.csi) { 2046 case NVME_CSI_ZNS: 2047 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 2048 dev_info(ns->ctrl->device, 2049 "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", 2050 info->nsid); 2051 return nvme_update_ns_info_generic(ns, info); 2052 } 2053 return nvme_update_ns_info_block(ns, info); 2054 case NVME_CSI_NVM: 2055 return nvme_update_ns_info_block(ns, info); 2056 default: 2057 dev_info(ns->ctrl->device, 2058 "block device for nsid %u not supported (csi %u)\n", 2059 info->nsid, info->ids.csi); 2060 return nvme_update_ns_info_generic(ns, info); 2061 } 2062 } 2063 2064 static char nvme_pr_type(enum pr_type type) 2065 { 2066 switch (type) { 2067 case PR_WRITE_EXCLUSIVE: 2068 return 1; 2069 case PR_EXCLUSIVE_ACCESS: 2070 return 2; 2071 case PR_WRITE_EXCLUSIVE_REG_ONLY: 2072 return 3; 2073 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 2074 return 4; 2075 case PR_WRITE_EXCLUSIVE_ALL_REGS: 2076 return 5; 2077 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 2078 return 6; 2079 default: 2080 return 0; 2081 } 2082 } 2083 2084 static int nvme_send_ns_head_pr_command(struct block_device *bdev, 2085 struct nvme_command *c, u8 data[16]) 2086 { 2087 struct nvme_ns_head *head = bdev->bd_disk->private_data; 2088 int srcu_idx = srcu_read_lock(&head->srcu); 2089 struct nvme_ns *ns = nvme_find_path(head); 2090 int ret = -EWOULDBLOCK; 2091 2092 if (ns) { 2093 c->common.nsid = cpu_to_le32(ns->head->ns_id); 2094 ret = nvme_submit_sync_cmd(ns->queue, c, data, 16); 2095 } 2096 srcu_read_unlock(&head->srcu, srcu_idx); 2097 return ret; 2098 } 2099 2100 static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, 2101 u8 data[16]) 2102 { 2103 c->common.nsid = cpu_to_le32(ns->head->ns_id); 2104 return nvme_submit_sync_cmd(ns->queue, c, data, 16); 2105 } 2106 2107 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 2108 u64 key, u64 sa_key, u8 op) 2109 { 2110 struct nvme_command c = { }; 2111 u8 data[16] = { 0, }; 2112 2113 put_unaligned_le64(key, &data[0]); 2114 put_unaligned_le64(sa_key, &data[8]); 2115 2116 c.common.opcode = op; 2117 c.common.cdw10 = cpu_to_le32(cdw10); 2118 2119 if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && 2120 bdev->bd_disk->fops == &nvme_ns_head_ops) 2121 return nvme_send_ns_head_pr_command(bdev, &c, data); 2122 return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data); 2123 } 2124 2125 static int nvme_pr_register(struct block_device *bdev, u64 old, 2126 u64 new, unsigned flags) 2127 { 2128 u32 cdw10; 2129 2130 if (flags & ~PR_FL_IGNORE_KEY) 2131 return -EOPNOTSUPP; 2132 2133 cdw10 = old ? 2 : 0; 2134 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 2135 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 2136 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 2137 } 2138 2139 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 2140 enum pr_type type, unsigned flags) 2141 { 2142 u32 cdw10; 2143 2144 if (flags & ~PR_FL_IGNORE_KEY) 2145 return -EOPNOTSUPP; 2146 2147 cdw10 = nvme_pr_type(type) << 8; 2148 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 2149 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 2150 } 2151 2152 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 2153 enum pr_type type, bool abort) 2154 { 2155 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); 2156 2157 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 2158 } 2159 2160 static int nvme_pr_clear(struct block_device *bdev, u64 key) 2161 { 2162 u32 cdw10 = 1 | (key ? 0 : 1 << 3); 2163 2164 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 2165 } 2166 2167 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 2168 { 2169 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3); 2170 2171 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 2172 } 2173 2174 const struct pr_ops nvme_pr_ops = { 2175 .pr_register = nvme_pr_register, 2176 .pr_reserve = nvme_pr_reserve, 2177 .pr_release = nvme_pr_release, 2178 .pr_preempt = nvme_pr_preempt, 2179 .pr_clear = nvme_pr_clear, 2180 }; 2181 2182 #ifdef CONFIG_BLK_SED_OPAL 2183 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 2184 bool send) 2185 { 2186 struct nvme_ctrl *ctrl = data; 2187 struct nvme_command cmd = { }; 2188 2189 if (send) 2190 cmd.common.opcode = nvme_admin_security_send; 2191 else 2192 cmd.common.opcode = nvme_admin_security_recv; 2193 cmd.common.nsid = 0; 2194 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 2195 cmd.common.cdw11 = cpu_to_le32(len); 2196 2197 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 2198 NVME_QID_ANY, 1, 0); 2199 } 2200 EXPORT_SYMBOL_GPL(nvme_sec_submit); 2201 #endif /* CONFIG_BLK_SED_OPAL */ 2202 2203 #ifdef CONFIG_BLK_DEV_ZONED 2204 static int nvme_report_zones(struct gendisk *disk, sector_t sector, 2205 unsigned int nr_zones, report_zones_cb cb, void *data) 2206 { 2207 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, 2208 data); 2209 } 2210 #else 2211 #define nvme_report_zones NULL 2212 #endif /* CONFIG_BLK_DEV_ZONED */ 2213 2214 static const struct block_device_operations nvme_bdev_ops = { 2215 .owner = THIS_MODULE, 2216 .ioctl = nvme_ioctl, 2217 .compat_ioctl = blkdev_compat_ptr_ioctl, 2218 .open = nvme_open, 2219 .release = nvme_release, 2220 .getgeo = nvme_getgeo, 2221 .report_zones = nvme_report_zones, 2222 .pr_ops = &nvme_pr_ops, 2223 }; 2224 2225 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 timeout, bool enabled) 2226 { 2227 unsigned long timeout_jiffies = ((timeout + 1) * HZ / 2) + jiffies; 2228 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 2229 int ret; 2230 2231 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2232 if (csts == ~0) 2233 return -ENODEV; 2234 if ((csts & NVME_CSTS_RDY) == bit) 2235 break; 2236 2237 usleep_range(1000, 2000); 2238 if (fatal_signal_pending(current)) 2239 return -EINTR; 2240 if (time_after(jiffies, timeout_jiffies)) { 2241 dev_err(ctrl->device, 2242 "Device not ready; aborting %s, CSTS=0x%x\n", 2243 enabled ? "initialisation" : "reset", csts); 2244 return -ENODEV; 2245 } 2246 } 2247 2248 return ret; 2249 } 2250 2251 /* 2252 * If the device has been passed off to us in an enabled state, just clear 2253 * the enabled bit. The spec says we should set the 'shutdown notification 2254 * bits', but doing so may cause the device to complete commands to the 2255 * admin queue ... and we don't know what memory that might be pointing at! 2256 */ 2257 int nvme_disable_ctrl(struct nvme_ctrl *ctrl) 2258 { 2259 int ret; 2260 2261 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2262 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 2263 2264 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2265 if (ret) 2266 return ret; 2267 2268 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 2269 msleep(NVME_QUIRK_DELAY_AMOUNT); 2270 2271 return nvme_wait_ready(ctrl, NVME_CAP_TIMEOUT(ctrl->cap), false); 2272 } 2273 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 2274 2275 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) 2276 { 2277 unsigned dev_page_min; 2278 u32 timeout; 2279 int ret; 2280 2281 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2282 if (ret) { 2283 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2284 return ret; 2285 } 2286 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2287 2288 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { 2289 dev_err(ctrl->device, 2290 "Minimum device page size %u too large for host (%u)\n", 2291 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); 2292 return -ENODEV; 2293 } 2294 2295 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) 2296 ctrl->ctrl_config = NVME_CC_CSS_CSI; 2297 else 2298 ctrl->ctrl_config = NVME_CC_CSS_NVM; 2299 2300 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { 2301 u32 crto; 2302 2303 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); 2304 if (ret) { 2305 dev_err(ctrl->device, "Reading CRTO failed (%d)\n", 2306 ret); 2307 return ret; 2308 } 2309 2310 if (ctrl->cap & NVME_CAP_CRMS_CRIMS) { 2311 ctrl->ctrl_config |= NVME_CC_CRIME; 2312 timeout = NVME_CRTO_CRIMT(crto); 2313 } else { 2314 timeout = NVME_CRTO_CRWMT(crto); 2315 } 2316 } else { 2317 timeout = NVME_CAP_TIMEOUT(ctrl->cap); 2318 } 2319 2320 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 2321 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 2322 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 2323 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2324 if (ret) 2325 return ret; 2326 2327 /* Flush write to device (required if transport is PCI) */ 2328 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); 2329 if (ret) 2330 return ret; 2331 2332 ctrl->ctrl_config |= NVME_CC_ENABLE; 2333 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2334 if (ret) 2335 return ret; 2336 return nvme_wait_ready(ctrl, timeout, true); 2337 } 2338 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 2339 2340 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 2341 { 2342 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); 2343 u32 csts; 2344 int ret; 2345 2346 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2347 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 2348 2349 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2350 if (ret) 2351 return ret; 2352 2353 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2354 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 2355 break; 2356 2357 msleep(100); 2358 if (fatal_signal_pending(current)) 2359 return -EINTR; 2360 if (time_after(jiffies, timeout)) { 2361 dev_err(ctrl->device, 2362 "Device shutdown incomplete; abort shutdown\n"); 2363 return -ENODEV; 2364 } 2365 } 2366 2367 return ret; 2368 } 2369 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 2370 2371 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 2372 { 2373 __le64 ts; 2374 int ret; 2375 2376 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 2377 return 0; 2378 2379 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 2380 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 2381 NULL); 2382 if (ret) 2383 dev_warn_once(ctrl->device, 2384 "could not set timestamp (%d)\n", ret); 2385 return ret; 2386 } 2387 2388 static int nvme_configure_host_options(struct nvme_ctrl *ctrl) 2389 { 2390 struct nvme_feat_host_behavior *host; 2391 u8 acre = 0, lbafee = 0; 2392 int ret; 2393 2394 /* Don't bother enabling the feature if retry delay is not reported */ 2395 if (ctrl->crdt[0]) 2396 acre = NVME_ENABLE_ACRE; 2397 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) 2398 lbafee = NVME_ENABLE_LBAFEE; 2399 2400 if (!acre && !lbafee) 2401 return 0; 2402 2403 host = kzalloc(sizeof(*host), GFP_KERNEL); 2404 if (!host) 2405 return 0; 2406 2407 host->acre = acre; 2408 host->lbafee = lbafee; 2409 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2410 host, sizeof(*host), NULL); 2411 kfree(host); 2412 return ret; 2413 } 2414 2415 /* 2416 * The function checks whether the given total (exlat + enlat) latency of 2417 * a power state allows the latter to be used as an APST transition target. 2418 * It does so by comparing the latency to the primary and secondary latency 2419 * tolerances defined by module params. If there's a match, the corresponding 2420 * timeout value is returned and the matching tolerance index (1 or 2) is 2421 * reported. 2422 */ 2423 static bool nvme_apst_get_transition_time(u64 total_latency, 2424 u64 *transition_time, unsigned *last_index) 2425 { 2426 if (total_latency <= apst_primary_latency_tol_us) { 2427 if (*last_index == 1) 2428 return false; 2429 *last_index = 1; 2430 *transition_time = apst_primary_timeout_ms; 2431 return true; 2432 } 2433 if (apst_secondary_timeout_ms && 2434 total_latency <= apst_secondary_latency_tol_us) { 2435 if (*last_index <= 2) 2436 return false; 2437 *last_index = 2; 2438 *transition_time = apst_secondary_timeout_ms; 2439 return true; 2440 } 2441 return false; 2442 } 2443 2444 /* 2445 * APST (Autonomous Power State Transition) lets us program a table of power 2446 * state transitions that the controller will perform automatically. 2447 * 2448 * Depending on module params, one of the two supported techniques will be used: 2449 * 2450 * - If the parameters provide explicit timeouts and tolerances, they will be 2451 * used to build a table with up to 2 non-operational states to transition to. 2452 * The default parameter values were selected based on the values used by 2453 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic 2454 * regeneration of the APST table in the event of switching between external 2455 * and battery power, the timeouts and tolerances reflect a compromise 2456 * between values used by Microsoft for AC and battery scenarios. 2457 * - If not, we'll configure the table with a simple heuristic: we are willing 2458 * to spend at most 2% of the time transitioning between power states. 2459 * Therefore, when running in any given state, we will enter the next 2460 * lower-power non-operational state after waiting 50 * (enlat + exlat) 2461 * microseconds, as long as that state's exit latency is under the requested 2462 * maximum latency. 2463 * 2464 * We will not autonomously enter any non-operational state for which the total 2465 * latency exceeds ps_max_latency_us. 2466 * 2467 * Users can set ps_max_latency_us to zero to turn off APST. 2468 */ 2469 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2470 { 2471 struct nvme_feat_auto_pst *table; 2472 unsigned apste = 0; 2473 u64 max_lat_us = 0; 2474 __le64 target = 0; 2475 int max_ps = -1; 2476 int state; 2477 int ret; 2478 unsigned last_lt_index = UINT_MAX; 2479 2480 /* 2481 * If APST isn't supported or if we haven't been initialized yet, 2482 * then don't do anything. 2483 */ 2484 if (!ctrl->apsta) 2485 return 0; 2486 2487 if (ctrl->npss > 31) { 2488 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2489 return 0; 2490 } 2491 2492 table = kzalloc(sizeof(*table), GFP_KERNEL); 2493 if (!table) 2494 return 0; 2495 2496 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2497 /* Turn off APST. */ 2498 dev_dbg(ctrl->device, "APST disabled\n"); 2499 goto done; 2500 } 2501 2502 /* 2503 * Walk through all states from lowest- to highest-power. 2504 * According to the spec, lower-numbered states use more power. NPSS, 2505 * despite the name, is the index of the lowest-power state, not the 2506 * number of states. 2507 */ 2508 for (state = (int)ctrl->npss; state >= 0; state--) { 2509 u64 total_latency_us, exit_latency_us, transition_ms; 2510 2511 if (target) 2512 table->entries[state] = target; 2513 2514 /* 2515 * Don't allow transitions to the deepest state if it's quirked 2516 * off. 2517 */ 2518 if (state == ctrl->npss && 2519 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2520 continue; 2521 2522 /* 2523 * Is this state a useful non-operational state for higher-power 2524 * states to autonomously transition to? 2525 */ 2526 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) 2527 continue; 2528 2529 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2530 if (exit_latency_us > ctrl->ps_max_latency_us) 2531 continue; 2532 2533 total_latency_us = exit_latency_us + 2534 le32_to_cpu(ctrl->psd[state].entry_lat); 2535 2536 /* 2537 * This state is good. It can be used as the APST idle target 2538 * for higher power states. 2539 */ 2540 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { 2541 if (!nvme_apst_get_transition_time(total_latency_us, 2542 &transition_ms, &last_lt_index)) 2543 continue; 2544 } else { 2545 transition_ms = total_latency_us + 19; 2546 do_div(transition_ms, 20); 2547 if (transition_ms > (1 << 24) - 1) 2548 transition_ms = (1 << 24) - 1; 2549 } 2550 2551 target = cpu_to_le64((state << 3) | (transition_ms << 8)); 2552 if (max_ps == -1) 2553 max_ps = state; 2554 if (total_latency_us > max_lat_us) 2555 max_lat_us = total_latency_us; 2556 } 2557 2558 if (max_ps == -1) 2559 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2560 else 2561 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2562 max_ps, max_lat_us, (int)sizeof(*table), table); 2563 apste = 1; 2564 2565 done: 2566 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2567 table, sizeof(*table), NULL); 2568 if (ret) 2569 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2570 kfree(table); 2571 return ret; 2572 } 2573 2574 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2575 { 2576 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2577 u64 latency; 2578 2579 switch (val) { 2580 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2581 case PM_QOS_LATENCY_ANY: 2582 latency = U64_MAX; 2583 break; 2584 2585 default: 2586 latency = val; 2587 } 2588 2589 if (ctrl->ps_max_latency_us != latency) { 2590 ctrl->ps_max_latency_us = latency; 2591 if (ctrl->state == NVME_CTRL_LIVE) 2592 nvme_configure_apst(ctrl); 2593 } 2594 } 2595 2596 struct nvme_core_quirk_entry { 2597 /* 2598 * NVMe model and firmware strings are padded with spaces. For 2599 * simplicity, strings in the quirk table are padded with NULLs 2600 * instead. 2601 */ 2602 u16 vid; 2603 const char *mn; 2604 const char *fr; 2605 unsigned long quirks; 2606 }; 2607 2608 static const struct nvme_core_quirk_entry core_quirks[] = { 2609 { 2610 /* 2611 * This Toshiba device seems to die using any APST states. See: 2612 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2613 */ 2614 .vid = 0x1179, 2615 .mn = "THNSF5256GPUK TOSHIBA", 2616 .quirks = NVME_QUIRK_NO_APST, 2617 }, 2618 { 2619 /* 2620 * This LiteON CL1-3D*-Q11 firmware version has a race 2621 * condition associated with actions related to suspend to idle 2622 * LiteON has resolved the problem in future firmware 2623 */ 2624 .vid = 0x14a4, 2625 .fr = "22301111", 2626 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2627 }, 2628 { 2629 /* 2630 * This Kioxia CD6-V Series / HPE PE8030 device times out and 2631 * aborts I/O during any load, but more easily reproducible 2632 * with discards (fstrim). 2633 * 2634 * The device is left in a state where it is also not possible 2635 * to use "nvme set-feature" to disable APST, but booting with 2636 * nvme_core.default_ps_max_latency=0 works. 2637 */ 2638 .vid = 0x1e0f, 2639 .mn = "KCD6XVUL6T40", 2640 .quirks = NVME_QUIRK_NO_APST, 2641 }, 2642 { 2643 /* 2644 * The external Samsung X5 SSD fails initialization without a 2645 * delay before checking if it is ready and has a whole set of 2646 * other problems. To make this even more interesting, it 2647 * shares the PCI ID with internal Samsung 970 Evo Plus that 2648 * does not need or want these quirks. 2649 */ 2650 .vid = 0x144d, 2651 .mn = "Samsung Portable SSD X5", 2652 .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 2653 NVME_QUIRK_NO_DEEPEST_PS | 2654 NVME_QUIRK_IGNORE_DEV_SUBNQN, 2655 } 2656 }; 2657 2658 /* match is null-terminated but idstr is space-padded. */ 2659 static bool string_matches(const char *idstr, const char *match, size_t len) 2660 { 2661 size_t matchlen; 2662 2663 if (!match) 2664 return true; 2665 2666 matchlen = strlen(match); 2667 WARN_ON_ONCE(matchlen > len); 2668 2669 if (memcmp(idstr, match, matchlen)) 2670 return false; 2671 2672 for (; matchlen < len; matchlen++) 2673 if (idstr[matchlen] != ' ') 2674 return false; 2675 2676 return true; 2677 } 2678 2679 static bool quirk_matches(const struct nvme_id_ctrl *id, 2680 const struct nvme_core_quirk_entry *q) 2681 { 2682 return q->vid == le16_to_cpu(id->vid) && 2683 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2684 string_matches(id->fr, q->fr, sizeof(id->fr)); 2685 } 2686 2687 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2688 struct nvme_id_ctrl *id) 2689 { 2690 size_t nqnlen; 2691 int off; 2692 2693 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2694 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2695 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2696 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2697 return; 2698 } 2699 2700 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2701 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2702 } 2703 2704 /* 2705 * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe 2706 * Base Specification 2.0. It is slightly different from the format 2707 * specified there due to historic reasons, and we can't change it now. 2708 */ 2709 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2710 "nqn.2014.08.org.nvmexpress:%04x%04x", 2711 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2712 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2713 off += sizeof(id->sn); 2714 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2715 off += sizeof(id->mn); 2716 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2717 } 2718 2719 static void nvme_release_subsystem(struct device *dev) 2720 { 2721 struct nvme_subsystem *subsys = 2722 container_of(dev, struct nvme_subsystem, dev); 2723 2724 if (subsys->instance >= 0) 2725 ida_free(&nvme_instance_ida, subsys->instance); 2726 kfree(subsys); 2727 } 2728 2729 static void nvme_destroy_subsystem(struct kref *ref) 2730 { 2731 struct nvme_subsystem *subsys = 2732 container_of(ref, struct nvme_subsystem, ref); 2733 2734 mutex_lock(&nvme_subsystems_lock); 2735 list_del(&subsys->entry); 2736 mutex_unlock(&nvme_subsystems_lock); 2737 2738 ida_destroy(&subsys->ns_ida); 2739 device_del(&subsys->dev); 2740 put_device(&subsys->dev); 2741 } 2742 2743 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2744 { 2745 kref_put(&subsys->ref, nvme_destroy_subsystem); 2746 } 2747 2748 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2749 { 2750 struct nvme_subsystem *subsys; 2751 2752 lockdep_assert_held(&nvme_subsystems_lock); 2753 2754 /* 2755 * Fail matches for discovery subsystems. This results 2756 * in each discovery controller bound to a unique subsystem. 2757 * This avoids issues with validating controller values 2758 * that can only be true when there is a single unique subsystem. 2759 * There may be multiple and completely independent entities 2760 * that provide discovery controllers. 2761 */ 2762 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) 2763 return NULL; 2764 2765 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2766 if (strcmp(subsys->subnqn, subsysnqn)) 2767 continue; 2768 if (!kref_get_unless_zero(&subsys->ref)) 2769 continue; 2770 return subsys; 2771 } 2772 2773 return NULL; 2774 } 2775 2776 #define SUBSYS_ATTR_RO(_name, _mode, _show) \ 2777 struct device_attribute subsys_attr_##_name = \ 2778 __ATTR(_name, _mode, _show, NULL) 2779 2780 static ssize_t nvme_subsys_show_nqn(struct device *dev, 2781 struct device_attribute *attr, 2782 char *buf) 2783 { 2784 struct nvme_subsystem *subsys = 2785 container_of(dev, struct nvme_subsystem, dev); 2786 2787 return sysfs_emit(buf, "%s\n", subsys->subnqn); 2788 } 2789 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2790 2791 static ssize_t nvme_subsys_show_type(struct device *dev, 2792 struct device_attribute *attr, 2793 char *buf) 2794 { 2795 struct nvme_subsystem *subsys = 2796 container_of(dev, struct nvme_subsystem, dev); 2797 2798 switch (subsys->subtype) { 2799 case NVME_NQN_DISC: 2800 return sysfs_emit(buf, "discovery\n"); 2801 case NVME_NQN_NVME: 2802 return sysfs_emit(buf, "nvm\n"); 2803 default: 2804 return sysfs_emit(buf, "reserved\n"); 2805 } 2806 } 2807 static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type); 2808 2809 #define nvme_subsys_show_str_function(field) \ 2810 static ssize_t subsys_##field##_show(struct device *dev, \ 2811 struct device_attribute *attr, char *buf) \ 2812 { \ 2813 struct nvme_subsystem *subsys = \ 2814 container_of(dev, struct nvme_subsystem, dev); \ 2815 return sysfs_emit(buf, "%.*s\n", \ 2816 (int)sizeof(subsys->field), subsys->field); \ 2817 } \ 2818 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 2819 2820 nvme_subsys_show_str_function(model); 2821 nvme_subsys_show_str_function(serial); 2822 nvme_subsys_show_str_function(firmware_rev); 2823 2824 static struct attribute *nvme_subsys_attrs[] = { 2825 &subsys_attr_model.attr, 2826 &subsys_attr_serial.attr, 2827 &subsys_attr_firmware_rev.attr, 2828 &subsys_attr_subsysnqn.attr, 2829 &subsys_attr_subsystype.attr, 2830 #ifdef CONFIG_NVME_MULTIPATH 2831 &subsys_attr_iopolicy.attr, 2832 #endif 2833 NULL, 2834 }; 2835 2836 static const struct attribute_group nvme_subsys_attrs_group = { 2837 .attrs = nvme_subsys_attrs, 2838 }; 2839 2840 static const struct attribute_group *nvme_subsys_attrs_groups[] = { 2841 &nvme_subsys_attrs_group, 2842 NULL, 2843 }; 2844 2845 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) 2846 { 2847 return ctrl->opts && ctrl->opts->discovery_nqn; 2848 } 2849 2850 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 2851 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2852 { 2853 struct nvme_ctrl *tmp; 2854 2855 lockdep_assert_held(&nvme_subsystems_lock); 2856 2857 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { 2858 if (nvme_state_terminal(tmp)) 2859 continue; 2860 2861 if (tmp->cntlid == ctrl->cntlid) { 2862 dev_err(ctrl->device, 2863 "Duplicate cntlid %u with %s, subsys %s, rejecting\n", 2864 ctrl->cntlid, dev_name(tmp->device), 2865 subsys->subnqn); 2866 return false; 2867 } 2868 2869 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || 2870 nvme_discovery_ctrl(ctrl)) 2871 continue; 2872 2873 dev_err(ctrl->device, 2874 "Subsystem does not support multiple controllers\n"); 2875 return false; 2876 } 2877 2878 return true; 2879 } 2880 2881 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2882 { 2883 struct nvme_subsystem *subsys, *found; 2884 int ret; 2885 2886 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2887 if (!subsys) 2888 return -ENOMEM; 2889 2890 subsys->instance = -1; 2891 mutex_init(&subsys->lock); 2892 kref_init(&subsys->ref); 2893 INIT_LIST_HEAD(&subsys->ctrls); 2894 INIT_LIST_HEAD(&subsys->nsheads); 2895 nvme_init_subnqn(subsys, ctrl, id); 2896 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2897 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2898 subsys->vendor_id = le16_to_cpu(id->vid); 2899 subsys->cmic = id->cmic; 2900 2901 /* Versions prior to 1.4 don't necessarily report a valid type */ 2902 if (id->cntrltype == NVME_CTRL_DISC || 2903 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) 2904 subsys->subtype = NVME_NQN_DISC; 2905 else 2906 subsys->subtype = NVME_NQN_NVME; 2907 2908 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { 2909 dev_err(ctrl->device, 2910 "Subsystem %s is not a discovery controller", 2911 subsys->subnqn); 2912 kfree(subsys); 2913 return -EINVAL; 2914 } 2915 subsys->awupf = le16_to_cpu(id->awupf); 2916 nvme_mpath_default_iopolicy(subsys); 2917 2918 subsys->dev.class = nvme_subsys_class; 2919 subsys->dev.release = nvme_release_subsystem; 2920 subsys->dev.groups = nvme_subsys_attrs_groups; 2921 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); 2922 device_initialize(&subsys->dev); 2923 2924 mutex_lock(&nvme_subsystems_lock); 2925 found = __nvme_find_get_subsystem(subsys->subnqn); 2926 if (found) { 2927 put_device(&subsys->dev); 2928 subsys = found; 2929 2930 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 2931 ret = -EINVAL; 2932 goto out_put_subsystem; 2933 } 2934 } else { 2935 ret = device_add(&subsys->dev); 2936 if (ret) { 2937 dev_err(ctrl->device, 2938 "failed to register subsystem device.\n"); 2939 put_device(&subsys->dev); 2940 goto out_unlock; 2941 } 2942 ida_init(&subsys->ns_ida); 2943 list_add_tail(&subsys->entry, &nvme_subsystems); 2944 } 2945 2946 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2947 dev_name(ctrl->device)); 2948 if (ret) { 2949 dev_err(ctrl->device, 2950 "failed to create sysfs link from subsystem.\n"); 2951 goto out_put_subsystem; 2952 } 2953 2954 if (!found) 2955 subsys->instance = ctrl->instance; 2956 ctrl->subsys = subsys; 2957 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2958 mutex_unlock(&nvme_subsystems_lock); 2959 return 0; 2960 2961 out_put_subsystem: 2962 nvme_put_subsystem(subsys); 2963 out_unlock: 2964 mutex_unlock(&nvme_subsystems_lock); 2965 return ret; 2966 } 2967 2968 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 2969 void *log, size_t size, u64 offset) 2970 { 2971 struct nvme_command c = { }; 2972 u32 dwlen = nvme_bytes_to_numd(size); 2973 2974 c.get_log_page.opcode = nvme_admin_get_log_page; 2975 c.get_log_page.nsid = cpu_to_le32(nsid); 2976 c.get_log_page.lid = log_page; 2977 c.get_log_page.lsp = lsp; 2978 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 2979 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 2980 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 2981 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 2982 c.get_log_page.csi = csi; 2983 2984 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2985 } 2986 2987 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, 2988 struct nvme_effects_log **log) 2989 { 2990 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); 2991 int ret; 2992 2993 if (cel) 2994 goto out; 2995 2996 cel = kzalloc(sizeof(*cel), GFP_KERNEL); 2997 if (!cel) 2998 return -ENOMEM; 2999 3000 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, 3001 cel, sizeof(*cel), 0); 3002 if (ret) { 3003 kfree(cel); 3004 return ret; 3005 } 3006 3007 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); 3008 out: 3009 *log = cel; 3010 return 0; 3011 } 3012 3013 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units) 3014 { 3015 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; 3016 3017 if (check_shl_overflow(1U, units + page_shift - 9, &val)) 3018 return UINT_MAX; 3019 return val; 3020 } 3021 3022 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) 3023 { 3024 struct nvme_command c = { }; 3025 struct nvme_id_ctrl_nvm *id; 3026 int ret; 3027 3028 if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { 3029 ctrl->max_discard_sectors = UINT_MAX; 3030 ctrl->max_discard_segments = NVME_DSM_MAX_RANGES; 3031 } else { 3032 ctrl->max_discard_sectors = 0; 3033 ctrl->max_discard_segments = 0; 3034 } 3035 3036 /* 3037 * Even though NVMe spec explicitly states that MDTS is not applicable 3038 * to the write-zeroes, we are cautious and limit the size to the 3039 * controllers max_hw_sectors value, which is based on the MDTS field 3040 * and possibly other limiting factors. 3041 */ 3042 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && 3043 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 3044 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; 3045 else 3046 ctrl->max_zeroes_sectors = 0; 3047 3048 if (nvme_ctrl_limited_cns(ctrl)) 3049 return 0; 3050 3051 id = kzalloc(sizeof(*id), GFP_KERNEL); 3052 if (!id) 3053 return 0; 3054 3055 c.identify.opcode = nvme_admin_identify; 3056 c.identify.cns = NVME_ID_CNS_CS_CTRL; 3057 c.identify.csi = NVME_CSI_NVM; 3058 3059 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 3060 if (ret) 3061 goto free_data; 3062 3063 if (id->dmrl) 3064 ctrl->max_discard_segments = id->dmrl; 3065 ctrl->dmrsl = le32_to_cpu(id->dmrsl); 3066 if (id->wzsl) 3067 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); 3068 3069 free_data: 3070 kfree(id); 3071 return ret; 3072 } 3073 3074 static int nvme_init_identify(struct nvme_ctrl *ctrl) 3075 { 3076 struct nvme_id_ctrl *id; 3077 u32 max_hw_sectors; 3078 bool prev_apst_enabled; 3079 int ret; 3080 3081 ret = nvme_identify_ctrl(ctrl, &id); 3082 if (ret) { 3083 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 3084 return -EIO; 3085 } 3086 3087 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 3088 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); 3089 if (ret < 0) 3090 goto out_free; 3091 } 3092 3093 if (!(ctrl->ops->flags & NVME_F_FABRICS)) 3094 ctrl->cntlid = le16_to_cpu(id->cntlid); 3095 3096 if (!ctrl->identified) { 3097 unsigned int i; 3098 3099 ret = nvme_init_subsystem(ctrl, id); 3100 if (ret) 3101 goto out_free; 3102 3103 /* 3104 * Check for quirks. Quirk can depend on firmware version, 3105 * so, in principle, the set of quirks present can change 3106 * across a reset. As a possible future enhancement, we 3107 * could re-scan for quirks every time we reinitialize 3108 * the device, but we'd have to make sure that the driver 3109 * behaves intelligently if the quirks change. 3110 */ 3111 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 3112 if (quirk_matches(id, &core_quirks[i])) 3113 ctrl->quirks |= core_quirks[i].quirks; 3114 } 3115 } 3116 memcpy(ctrl->subsys->firmware_rev, id->fr, 3117 sizeof(ctrl->subsys->firmware_rev)); 3118 3119 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 3120 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 3121 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 3122 } 3123 3124 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 3125 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 3126 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 3127 3128 ctrl->oacs = le16_to_cpu(id->oacs); 3129 ctrl->oncs = le16_to_cpu(id->oncs); 3130 ctrl->mtfa = le16_to_cpu(id->mtfa); 3131 ctrl->oaes = le32_to_cpu(id->oaes); 3132 ctrl->wctemp = le16_to_cpu(id->wctemp); 3133 ctrl->cctemp = le16_to_cpu(id->cctemp); 3134 3135 atomic_set(&ctrl->abort_limit, id->acl + 1); 3136 ctrl->vwc = id->vwc; 3137 if (id->mdts) 3138 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); 3139 else 3140 max_hw_sectors = UINT_MAX; 3141 ctrl->max_hw_sectors = 3142 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 3143 3144 nvme_set_queue_limits(ctrl, ctrl->admin_q); 3145 ctrl->sgls = le32_to_cpu(id->sgls); 3146 ctrl->kas = le16_to_cpu(id->kas); 3147 ctrl->max_namespaces = le32_to_cpu(id->mnan); 3148 ctrl->ctratt = le32_to_cpu(id->ctratt); 3149 3150 ctrl->cntrltype = id->cntrltype; 3151 ctrl->dctype = id->dctype; 3152 3153 if (id->rtd3e) { 3154 /* us -> s */ 3155 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; 3156 3157 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 3158 shutdown_timeout, 60); 3159 3160 if (ctrl->shutdown_timeout != shutdown_timeout) 3161 dev_info(ctrl->device, 3162 "Shutdown timeout set to %u seconds\n", 3163 ctrl->shutdown_timeout); 3164 } else 3165 ctrl->shutdown_timeout = shutdown_timeout; 3166 3167 ctrl->npss = id->npss; 3168 ctrl->apsta = id->apsta; 3169 prev_apst_enabled = ctrl->apst_enabled; 3170 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 3171 if (force_apst && id->apsta) { 3172 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 3173 ctrl->apst_enabled = true; 3174 } else { 3175 ctrl->apst_enabled = false; 3176 } 3177 } else { 3178 ctrl->apst_enabled = id->apsta; 3179 } 3180 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 3181 3182 if (ctrl->ops->flags & NVME_F_FABRICS) { 3183 ctrl->icdoff = le16_to_cpu(id->icdoff); 3184 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 3185 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 3186 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 3187 3188 /* 3189 * In fabrics we need to verify the cntlid matches the 3190 * admin connect 3191 */ 3192 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 3193 dev_err(ctrl->device, 3194 "Mismatching cntlid: Connect %u vs Identify " 3195 "%u, rejecting\n", 3196 ctrl->cntlid, le16_to_cpu(id->cntlid)); 3197 ret = -EINVAL; 3198 goto out_free; 3199 } 3200 3201 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { 3202 dev_err(ctrl->device, 3203 "keep-alive support is mandatory for fabrics\n"); 3204 ret = -EINVAL; 3205 goto out_free; 3206 } 3207 } else { 3208 ctrl->hmpre = le32_to_cpu(id->hmpre); 3209 ctrl->hmmin = le32_to_cpu(id->hmmin); 3210 ctrl->hmminds = le32_to_cpu(id->hmminds); 3211 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 3212 } 3213 3214 ret = nvme_mpath_init_identify(ctrl, id); 3215 if (ret < 0) 3216 goto out_free; 3217 3218 if (ctrl->apst_enabled && !prev_apst_enabled) 3219 dev_pm_qos_expose_latency_tolerance(ctrl->device); 3220 else if (!ctrl->apst_enabled && prev_apst_enabled) 3221 dev_pm_qos_hide_latency_tolerance(ctrl->device); 3222 3223 out_free: 3224 kfree(id); 3225 return ret; 3226 } 3227 3228 /* 3229 * Initialize the cached copies of the Identify data and various controller 3230 * register in our nvme_ctrl structure. This should be called as soon as 3231 * the admin queue is fully up and running. 3232 */ 3233 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl) 3234 { 3235 int ret; 3236 3237 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 3238 if (ret) { 3239 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 3240 return ret; 3241 } 3242 3243 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); 3244 3245 if (ctrl->vs >= NVME_VS(1, 1, 0)) 3246 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); 3247 3248 ret = nvme_init_identify(ctrl); 3249 if (ret) 3250 return ret; 3251 3252 ret = nvme_configure_apst(ctrl); 3253 if (ret < 0) 3254 return ret; 3255 3256 ret = nvme_configure_timestamp(ctrl); 3257 if (ret < 0) 3258 return ret; 3259 3260 ret = nvme_configure_host_options(ctrl); 3261 if (ret < 0) 3262 return ret; 3263 3264 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { 3265 /* 3266 * Do not return errors unless we are in a controller reset, 3267 * the controller works perfectly fine without hwmon. 3268 */ 3269 ret = nvme_hwmon_init(ctrl); 3270 if (ret == -EINTR) 3271 return ret; 3272 } 3273 3274 ctrl->identified = true; 3275 3276 return 0; 3277 } 3278 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); 3279 3280 static int nvme_dev_open(struct inode *inode, struct file *file) 3281 { 3282 struct nvme_ctrl *ctrl = 3283 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3284 3285 switch (ctrl->state) { 3286 case NVME_CTRL_LIVE: 3287 break; 3288 default: 3289 return -EWOULDBLOCK; 3290 } 3291 3292 nvme_get_ctrl(ctrl); 3293 if (!try_module_get(ctrl->ops->module)) { 3294 nvme_put_ctrl(ctrl); 3295 return -EINVAL; 3296 } 3297 3298 file->private_data = ctrl; 3299 return 0; 3300 } 3301 3302 static int nvme_dev_release(struct inode *inode, struct file *file) 3303 { 3304 struct nvme_ctrl *ctrl = 3305 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3306 3307 module_put(ctrl->ops->module); 3308 nvme_put_ctrl(ctrl); 3309 return 0; 3310 } 3311 3312 static const struct file_operations nvme_dev_fops = { 3313 .owner = THIS_MODULE, 3314 .open = nvme_dev_open, 3315 .release = nvme_dev_release, 3316 .unlocked_ioctl = nvme_dev_ioctl, 3317 .compat_ioctl = compat_ptr_ioctl, 3318 .uring_cmd = nvme_dev_uring_cmd, 3319 }; 3320 3321 static ssize_t nvme_sysfs_reset(struct device *dev, 3322 struct device_attribute *attr, const char *buf, 3323 size_t count) 3324 { 3325 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3326 int ret; 3327 3328 ret = nvme_reset_ctrl_sync(ctrl); 3329 if (ret < 0) 3330 return ret; 3331 return count; 3332 } 3333 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 3334 3335 static ssize_t nvme_sysfs_rescan(struct device *dev, 3336 struct device_attribute *attr, const char *buf, 3337 size_t count) 3338 { 3339 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3340 3341 nvme_queue_scan(ctrl); 3342 return count; 3343 } 3344 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 3345 3346 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 3347 { 3348 struct gendisk *disk = dev_to_disk(dev); 3349 3350 if (disk->fops == &nvme_bdev_ops) 3351 return nvme_get_ns_from_dev(dev)->head; 3352 else 3353 return disk->private_data; 3354 } 3355 3356 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 3357 char *buf) 3358 { 3359 struct nvme_ns_head *head = dev_to_ns_head(dev); 3360 struct nvme_ns_ids *ids = &head->ids; 3361 struct nvme_subsystem *subsys = head->subsys; 3362 int serial_len = sizeof(subsys->serial); 3363 int model_len = sizeof(subsys->model); 3364 3365 if (!uuid_is_null(&ids->uuid)) 3366 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); 3367 3368 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3369 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); 3370 3371 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3372 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); 3373 3374 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 3375 subsys->serial[serial_len - 1] == '\0')) 3376 serial_len--; 3377 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 3378 subsys->model[model_len - 1] == '\0')) 3379 model_len--; 3380 3381 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 3382 serial_len, subsys->serial, model_len, subsys->model, 3383 head->ns_id); 3384 } 3385 static DEVICE_ATTR_RO(wwid); 3386 3387 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 3388 char *buf) 3389 { 3390 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 3391 } 3392 static DEVICE_ATTR_RO(nguid); 3393 3394 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 3395 char *buf) 3396 { 3397 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3398 3399 /* For backward compatibility expose the NGUID to userspace if 3400 * we have no UUID set 3401 */ 3402 if (uuid_is_null(&ids->uuid)) { 3403 dev_warn_ratelimited(dev, 3404 "No UUID available providing old NGUID\n"); 3405 return sysfs_emit(buf, "%pU\n", ids->nguid); 3406 } 3407 return sysfs_emit(buf, "%pU\n", &ids->uuid); 3408 } 3409 static DEVICE_ATTR_RO(uuid); 3410 3411 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 3412 char *buf) 3413 { 3414 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 3415 } 3416 static DEVICE_ATTR_RO(eui); 3417 3418 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 3419 char *buf) 3420 { 3421 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 3422 } 3423 static DEVICE_ATTR_RO(nsid); 3424 3425 static struct attribute *nvme_ns_id_attrs[] = { 3426 &dev_attr_wwid.attr, 3427 &dev_attr_uuid.attr, 3428 &dev_attr_nguid.attr, 3429 &dev_attr_eui.attr, 3430 &dev_attr_nsid.attr, 3431 #ifdef CONFIG_NVME_MULTIPATH 3432 &dev_attr_ana_grpid.attr, 3433 &dev_attr_ana_state.attr, 3434 #endif 3435 NULL, 3436 }; 3437 3438 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, 3439 struct attribute *a, int n) 3440 { 3441 struct device *dev = container_of(kobj, struct device, kobj); 3442 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3443 3444 if (a == &dev_attr_uuid.attr) { 3445 if (uuid_is_null(&ids->uuid) && 3446 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3447 return 0; 3448 } 3449 if (a == &dev_attr_nguid.attr) { 3450 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3451 return 0; 3452 } 3453 if (a == &dev_attr_eui.attr) { 3454 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3455 return 0; 3456 } 3457 #ifdef CONFIG_NVME_MULTIPATH 3458 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { 3459 if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */ 3460 return 0; 3461 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) 3462 return 0; 3463 } 3464 #endif 3465 return a->mode; 3466 } 3467 3468 static const struct attribute_group nvme_ns_id_attr_group = { 3469 .attrs = nvme_ns_id_attrs, 3470 .is_visible = nvme_ns_id_attrs_are_visible, 3471 }; 3472 3473 const struct attribute_group *nvme_ns_id_attr_groups[] = { 3474 &nvme_ns_id_attr_group, 3475 NULL, 3476 }; 3477 3478 #define nvme_show_str_function(field) \ 3479 static ssize_t field##_show(struct device *dev, \ 3480 struct device_attribute *attr, char *buf) \ 3481 { \ 3482 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3483 return sysfs_emit(buf, "%.*s\n", \ 3484 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 3485 } \ 3486 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3487 3488 nvme_show_str_function(model); 3489 nvme_show_str_function(serial); 3490 nvme_show_str_function(firmware_rev); 3491 3492 #define nvme_show_int_function(field) \ 3493 static ssize_t field##_show(struct device *dev, \ 3494 struct device_attribute *attr, char *buf) \ 3495 { \ 3496 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3497 return sysfs_emit(buf, "%d\n", ctrl->field); \ 3498 } \ 3499 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3500 3501 nvme_show_int_function(cntlid); 3502 nvme_show_int_function(numa_node); 3503 nvme_show_int_function(queue_count); 3504 nvme_show_int_function(sqsize); 3505 nvme_show_int_function(kato); 3506 3507 static ssize_t nvme_sysfs_delete(struct device *dev, 3508 struct device_attribute *attr, const char *buf, 3509 size_t count) 3510 { 3511 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3512 3513 if (device_remove_file_self(dev, attr)) 3514 nvme_delete_ctrl_sync(ctrl); 3515 return count; 3516 } 3517 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 3518 3519 static ssize_t nvme_sysfs_show_transport(struct device *dev, 3520 struct device_attribute *attr, 3521 char *buf) 3522 { 3523 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3524 3525 return sysfs_emit(buf, "%s\n", ctrl->ops->name); 3526 } 3527 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 3528 3529 static ssize_t nvme_sysfs_show_state(struct device *dev, 3530 struct device_attribute *attr, 3531 char *buf) 3532 { 3533 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3534 static const char *const state_name[] = { 3535 [NVME_CTRL_NEW] = "new", 3536 [NVME_CTRL_LIVE] = "live", 3537 [NVME_CTRL_RESETTING] = "resetting", 3538 [NVME_CTRL_CONNECTING] = "connecting", 3539 [NVME_CTRL_DELETING] = "deleting", 3540 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)", 3541 [NVME_CTRL_DEAD] = "dead", 3542 }; 3543 3544 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 3545 state_name[ctrl->state]) 3546 return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); 3547 3548 return sysfs_emit(buf, "unknown state\n"); 3549 } 3550 3551 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 3552 3553 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 3554 struct device_attribute *attr, 3555 char *buf) 3556 { 3557 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3558 3559 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); 3560 } 3561 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 3562 3563 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, 3564 struct device_attribute *attr, 3565 char *buf) 3566 { 3567 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3568 3569 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); 3570 } 3571 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); 3572 3573 static ssize_t nvme_sysfs_show_hostid(struct device *dev, 3574 struct device_attribute *attr, 3575 char *buf) 3576 { 3577 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3578 3579 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); 3580 } 3581 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); 3582 3583 static ssize_t nvme_sysfs_show_address(struct device *dev, 3584 struct device_attribute *attr, 3585 char *buf) 3586 { 3587 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3588 3589 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 3590 } 3591 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 3592 3593 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, 3594 struct device_attribute *attr, char *buf) 3595 { 3596 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3597 struct nvmf_ctrl_options *opts = ctrl->opts; 3598 3599 if (ctrl->opts->max_reconnects == -1) 3600 return sysfs_emit(buf, "off\n"); 3601 return sysfs_emit(buf, "%d\n", 3602 opts->max_reconnects * opts->reconnect_delay); 3603 } 3604 3605 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, 3606 struct device_attribute *attr, const char *buf, size_t count) 3607 { 3608 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3609 struct nvmf_ctrl_options *opts = ctrl->opts; 3610 int ctrl_loss_tmo, err; 3611 3612 err = kstrtoint(buf, 10, &ctrl_loss_tmo); 3613 if (err) 3614 return -EINVAL; 3615 3616 if (ctrl_loss_tmo < 0) 3617 opts->max_reconnects = -1; 3618 else 3619 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 3620 opts->reconnect_delay); 3621 return count; 3622 } 3623 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR, 3624 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store); 3625 3626 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, 3627 struct device_attribute *attr, char *buf) 3628 { 3629 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3630 3631 if (ctrl->opts->reconnect_delay == -1) 3632 return sysfs_emit(buf, "off\n"); 3633 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); 3634 } 3635 3636 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, 3637 struct device_attribute *attr, const char *buf, size_t count) 3638 { 3639 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3640 unsigned int v; 3641 int err; 3642 3643 err = kstrtou32(buf, 10, &v); 3644 if (err) 3645 return err; 3646 3647 ctrl->opts->reconnect_delay = v; 3648 return count; 3649 } 3650 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, 3651 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store); 3652 3653 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev, 3654 struct device_attribute *attr, char *buf) 3655 { 3656 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3657 3658 if (ctrl->opts->fast_io_fail_tmo == -1) 3659 return sysfs_emit(buf, "off\n"); 3660 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo); 3661 } 3662 3663 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, 3664 struct device_attribute *attr, const char *buf, size_t count) 3665 { 3666 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3667 struct nvmf_ctrl_options *opts = ctrl->opts; 3668 int fast_io_fail_tmo, err; 3669 3670 err = kstrtoint(buf, 10, &fast_io_fail_tmo); 3671 if (err) 3672 return -EINVAL; 3673 3674 if (fast_io_fail_tmo < 0) 3675 opts->fast_io_fail_tmo = -1; 3676 else 3677 opts->fast_io_fail_tmo = fast_io_fail_tmo; 3678 return count; 3679 } 3680 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, 3681 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); 3682 3683 static ssize_t cntrltype_show(struct device *dev, 3684 struct device_attribute *attr, char *buf) 3685 { 3686 static const char * const type[] = { 3687 [NVME_CTRL_IO] = "io\n", 3688 [NVME_CTRL_DISC] = "discovery\n", 3689 [NVME_CTRL_ADMIN] = "admin\n", 3690 }; 3691 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3692 3693 if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype]) 3694 return sysfs_emit(buf, "reserved\n"); 3695 3696 return sysfs_emit(buf, type[ctrl->cntrltype]); 3697 } 3698 static DEVICE_ATTR_RO(cntrltype); 3699 3700 static ssize_t dctype_show(struct device *dev, 3701 struct device_attribute *attr, char *buf) 3702 { 3703 static const char * const type[] = { 3704 [NVME_DCTYPE_NOT_REPORTED] = "none\n", 3705 [NVME_DCTYPE_DDC] = "ddc\n", 3706 [NVME_DCTYPE_CDC] = "cdc\n", 3707 }; 3708 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3709 3710 if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype]) 3711 return sysfs_emit(buf, "reserved\n"); 3712 3713 return sysfs_emit(buf, type[ctrl->dctype]); 3714 } 3715 static DEVICE_ATTR_RO(dctype); 3716 3717 #ifdef CONFIG_NVME_AUTH 3718 static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, 3719 struct device_attribute *attr, char *buf) 3720 { 3721 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3722 struct nvmf_ctrl_options *opts = ctrl->opts; 3723 3724 if (!opts->dhchap_secret) 3725 return sysfs_emit(buf, "none\n"); 3726 return sysfs_emit(buf, "%s\n", opts->dhchap_secret); 3727 } 3728 3729 static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev, 3730 struct device_attribute *attr, const char *buf, size_t count) 3731 { 3732 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3733 struct nvmf_ctrl_options *opts = ctrl->opts; 3734 char *dhchap_secret; 3735 3736 if (!ctrl->opts->dhchap_secret) 3737 return -EINVAL; 3738 if (count < 7) 3739 return -EINVAL; 3740 if (memcmp(buf, "DHHC-1:", 7)) 3741 return -EINVAL; 3742 3743 dhchap_secret = kzalloc(count + 1, GFP_KERNEL); 3744 if (!dhchap_secret) 3745 return -ENOMEM; 3746 memcpy(dhchap_secret, buf, count); 3747 nvme_auth_stop(ctrl); 3748 if (strcmp(dhchap_secret, opts->dhchap_secret)) { 3749 int ret; 3750 3751 ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key); 3752 if (ret) 3753 return ret; 3754 kfree(opts->dhchap_secret); 3755 opts->dhchap_secret = dhchap_secret; 3756 /* Key has changed; re-authentication with new key */ 3757 nvme_auth_reset(ctrl); 3758 } 3759 /* Start re-authentication */ 3760 dev_info(ctrl->device, "re-authenticating controller\n"); 3761 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 3762 3763 return count; 3764 } 3765 static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR, 3766 nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store); 3767 3768 static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev, 3769 struct device_attribute *attr, char *buf) 3770 { 3771 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3772 struct nvmf_ctrl_options *opts = ctrl->opts; 3773 3774 if (!opts->dhchap_ctrl_secret) 3775 return sysfs_emit(buf, "none\n"); 3776 return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret); 3777 } 3778 3779 static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev, 3780 struct device_attribute *attr, const char *buf, size_t count) 3781 { 3782 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3783 struct nvmf_ctrl_options *opts = ctrl->opts; 3784 char *dhchap_secret; 3785 3786 if (!ctrl->opts->dhchap_ctrl_secret) 3787 return -EINVAL; 3788 if (count < 7) 3789 return -EINVAL; 3790 if (memcmp(buf, "DHHC-1:", 7)) 3791 return -EINVAL; 3792 3793 dhchap_secret = kzalloc(count + 1, GFP_KERNEL); 3794 if (!dhchap_secret) 3795 return -ENOMEM; 3796 memcpy(dhchap_secret, buf, count); 3797 nvme_auth_stop(ctrl); 3798 if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) { 3799 int ret; 3800 3801 ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key); 3802 if (ret) 3803 return ret; 3804 kfree(opts->dhchap_ctrl_secret); 3805 opts->dhchap_ctrl_secret = dhchap_secret; 3806 /* Key has changed; re-authentication with new key */ 3807 nvme_auth_reset(ctrl); 3808 } 3809 /* Start re-authentication */ 3810 dev_info(ctrl->device, "re-authenticating controller\n"); 3811 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 3812 3813 return count; 3814 } 3815 static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, 3816 nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); 3817 #endif 3818 3819 static struct attribute *nvme_dev_attrs[] = { 3820 &dev_attr_reset_controller.attr, 3821 &dev_attr_rescan_controller.attr, 3822 &dev_attr_model.attr, 3823 &dev_attr_serial.attr, 3824 &dev_attr_firmware_rev.attr, 3825 &dev_attr_cntlid.attr, 3826 &dev_attr_delete_controller.attr, 3827 &dev_attr_transport.attr, 3828 &dev_attr_subsysnqn.attr, 3829 &dev_attr_address.attr, 3830 &dev_attr_state.attr, 3831 &dev_attr_numa_node.attr, 3832 &dev_attr_queue_count.attr, 3833 &dev_attr_sqsize.attr, 3834 &dev_attr_hostnqn.attr, 3835 &dev_attr_hostid.attr, 3836 &dev_attr_ctrl_loss_tmo.attr, 3837 &dev_attr_reconnect_delay.attr, 3838 &dev_attr_fast_io_fail_tmo.attr, 3839 &dev_attr_kato.attr, 3840 &dev_attr_cntrltype.attr, 3841 &dev_attr_dctype.attr, 3842 #ifdef CONFIG_NVME_AUTH 3843 &dev_attr_dhchap_secret.attr, 3844 &dev_attr_dhchap_ctrl_secret.attr, 3845 #endif 3846 NULL 3847 }; 3848 3849 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 3850 struct attribute *a, int n) 3851 { 3852 struct device *dev = container_of(kobj, struct device, kobj); 3853 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3854 3855 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 3856 return 0; 3857 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 3858 return 0; 3859 if (a == &dev_attr_hostnqn.attr && !ctrl->opts) 3860 return 0; 3861 if (a == &dev_attr_hostid.attr && !ctrl->opts) 3862 return 0; 3863 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) 3864 return 0; 3865 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) 3866 return 0; 3867 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) 3868 return 0; 3869 #ifdef CONFIG_NVME_AUTH 3870 if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts) 3871 return 0; 3872 if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) 3873 return 0; 3874 #endif 3875 3876 return a->mode; 3877 } 3878 3879 static const struct attribute_group nvme_dev_attrs_group = { 3880 .attrs = nvme_dev_attrs, 3881 .is_visible = nvme_dev_attrs_are_visible, 3882 }; 3883 3884 static const struct attribute_group *nvme_dev_attr_groups[] = { 3885 &nvme_dev_attrs_group, 3886 NULL, 3887 }; 3888 3889 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, 3890 unsigned nsid) 3891 { 3892 struct nvme_ns_head *h; 3893 3894 lockdep_assert_held(&ctrl->subsys->lock); 3895 3896 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { 3897 /* 3898 * Private namespaces can share NSIDs under some conditions. 3899 * In that case we can't use the same ns_head for namespaces 3900 * with the same NSID. 3901 */ 3902 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) 3903 continue; 3904 if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) 3905 return h; 3906 } 3907 3908 return NULL; 3909 } 3910 3911 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, 3912 struct nvme_ns_ids *ids) 3913 { 3914 bool has_uuid = !uuid_is_null(&ids->uuid); 3915 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid)); 3916 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 3917 struct nvme_ns_head *h; 3918 3919 lockdep_assert_held(&subsys->lock); 3920 3921 list_for_each_entry(h, &subsys->nsheads, entry) { 3922 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid)) 3923 return -EINVAL; 3924 if (has_nguid && 3925 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0) 3926 return -EINVAL; 3927 if (has_eui64 && 3928 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0) 3929 return -EINVAL; 3930 } 3931 3932 return 0; 3933 } 3934 3935 static void nvme_cdev_rel(struct device *dev) 3936 { 3937 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); 3938 } 3939 3940 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) 3941 { 3942 cdev_device_del(cdev, cdev_device); 3943 put_device(cdev_device); 3944 } 3945 3946 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, 3947 const struct file_operations *fops, struct module *owner) 3948 { 3949 int minor, ret; 3950 3951 minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL); 3952 if (minor < 0) 3953 return minor; 3954 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); 3955 cdev_device->class = nvme_ns_chr_class; 3956 cdev_device->release = nvme_cdev_rel; 3957 device_initialize(cdev_device); 3958 cdev_init(cdev, fops); 3959 cdev->owner = owner; 3960 ret = cdev_device_add(cdev, cdev_device); 3961 if (ret) 3962 put_device(cdev_device); 3963 3964 return ret; 3965 } 3966 3967 static int nvme_ns_chr_open(struct inode *inode, struct file *file) 3968 { 3969 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3970 } 3971 3972 static int nvme_ns_chr_release(struct inode *inode, struct file *file) 3973 { 3974 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3975 return 0; 3976 } 3977 3978 static const struct file_operations nvme_ns_chr_fops = { 3979 .owner = THIS_MODULE, 3980 .open = nvme_ns_chr_open, 3981 .release = nvme_ns_chr_release, 3982 .unlocked_ioctl = nvme_ns_chr_ioctl, 3983 .compat_ioctl = compat_ptr_ioctl, 3984 .uring_cmd = nvme_ns_chr_uring_cmd, 3985 .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, 3986 }; 3987 3988 static int nvme_add_ns_cdev(struct nvme_ns *ns) 3989 { 3990 int ret; 3991 3992 ns->cdev_device.parent = ns->ctrl->device; 3993 ret = dev_set_name(&ns->cdev_device, "ng%dn%d", 3994 ns->ctrl->instance, ns->head->instance); 3995 if (ret) 3996 return ret; 3997 3998 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, 3999 ns->ctrl->ops->module); 4000 } 4001 4002 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 4003 struct nvme_ns_info *info) 4004 { 4005 struct nvme_ns_head *head; 4006 size_t size = sizeof(*head); 4007 int ret = -ENOMEM; 4008 4009 #ifdef CONFIG_NVME_MULTIPATH 4010 size += num_possible_nodes() * sizeof(struct nvme_ns *); 4011 #endif 4012 4013 head = kzalloc(size, GFP_KERNEL); 4014 if (!head) 4015 goto out; 4016 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL); 4017 if (ret < 0) 4018 goto out_free_head; 4019 head->instance = ret; 4020 INIT_LIST_HEAD(&head->list); 4021 ret = init_srcu_struct(&head->srcu); 4022 if (ret) 4023 goto out_ida_remove; 4024 head->subsys = ctrl->subsys; 4025 head->ns_id = info->nsid; 4026 head->ids = info->ids; 4027 head->shared = info->is_shared; 4028 kref_init(&head->ref); 4029 4030 if (head->ids.csi) { 4031 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); 4032 if (ret) 4033 goto out_cleanup_srcu; 4034 } else 4035 head->effects = ctrl->effects; 4036 4037 ret = nvme_mpath_alloc_disk(ctrl, head); 4038 if (ret) 4039 goto out_cleanup_srcu; 4040 4041 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 4042 4043 kref_get(&ctrl->subsys->ref); 4044 4045 return head; 4046 out_cleanup_srcu: 4047 cleanup_srcu_struct(&head->srcu); 4048 out_ida_remove: 4049 ida_free(&ctrl->subsys->ns_ida, head->instance); 4050 out_free_head: 4051 kfree(head); 4052 out: 4053 if (ret > 0) 4054 ret = blk_status_to_errno(nvme_error_status(ret)); 4055 return ERR_PTR(ret); 4056 } 4057 4058 static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this, 4059 struct nvme_ns_ids *ids) 4060 { 4061 struct nvme_subsystem *s; 4062 int ret = 0; 4063 4064 /* 4065 * Note that this check is racy as we try to avoid holding the global 4066 * lock over the whole ns_head creation. But it is only intended as 4067 * a sanity check anyway. 4068 */ 4069 mutex_lock(&nvme_subsystems_lock); 4070 list_for_each_entry(s, &nvme_subsystems, entry) { 4071 if (s == this) 4072 continue; 4073 mutex_lock(&s->lock); 4074 ret = nvme_subsys_check_duplicate_ids(s, ids); 4075 mutex_unlock(&s->lock); 4076 if (ret) 4077 break; 4078 } 4079 mutex_unlock(&nvme_subsystems_lock); 4080 4081 return ret; 4082 } 4083 4084 static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) 4085 { 4086 struct nvme_ctrl *ctrl = ns->ctrl; 4087 struct nvme_ns_head *head = NULL; 4088 int ret; 4089 4090 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids); 4091 if (ret) { 4092 dev_err(ctrl->device, 4093 "globally duplicate IDs for nsid %d\n", info->nsid); 4094 nvme_print_device_info(ctrl); 4095 return ret; 4096 } 4097 4098 mutex_lock(&ctrl->subsys->lock); 4099 head = nvme_find_ns_head(ctrl, info->nsid); 4100 if (!head) { 4101 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids); 4102 if (ret) { 4103 dev_err(ctrl->device, 4104 "duplicate IDs in subsystem for nsid %d\n", 4105 info->nsid); 4106 goto out_unlock; 4107 } 4108 head = nvme_alloc_ns_head(ctrl, info); 4109 if (IS_ERR(head)) { 4110 ret = PTR_ERR(head); 4111 goto out_unlock; 4112 } 4113 } else { 4114 ret = -EINVAL; 4115 if (!info->is_shared || !head->shared) { 4116 dev_err(ctrl->device, 4117 "Duplicate unshared namespace %d\n", 4118 info->nsid); 4119 goto out_put_ns_head; 4120 } 4121 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) { 4122 dev_err(ctrl->device, 4123 "IDs don't match for shared namespace %d\n", 4124 info->nsid); 4125 goto out_put_ns_head; 4126 } 4127 4128 if (!multipath && !list_empty(&head->list)) { 4129 dev_warn(ctrl->device, 4130 "Found shared namespace %d, but multipathing not supported.\n", 4131 info->nsid); 4132 dev_warn_once(ctrl->device, 4133 "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n."); 4134 } 4135 } 4136 4137 list_add_tail_rcu(&ns->siblings, &head->list); 4138 ns->head = head; 4139 mutex_unlock(&ctrl->subsys->lock); 4140 return 0; 4141 4142 out_put_ns_head: 4143 nvme_put_ns_head(head); 4144 out_unlock: 4145 mutex_unlock(&ctrl->subsys->lock); 4146 return ret; 4147 } 4148 4149 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 4150 { 4151 struct nvme_ns *ns, *ret = NULL; 4152 4153 down_read(&ctrl->namespaces_rwsem); 4154 list_for_each_entry(ns, &ctrl->namespaces, list) { 4155 if (ns->head->ns_id == nsid) { 4156 if (!nvme_get_ns(ns)) 4157 continue; 4158 ret = ns; 4159 break; 4160 } 4161 if (ns->head->ns_id > nsid) 4162 break; 4163 } 4164 up_read(&ctrl->namespaces_rwsem); 4165 return ret; 4166 } 4167 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); 4168 4169 /* 4170 * Add the namespace to the controller list while keeping the list ordered. 4171 */ 4172 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) 4173 { 4174 struct nvme_ns *tmp; 4175 4176 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { 4177 if (tmp->head->ns_id < ns->head->ns_id) { 4178 list_add(&ns->list, &tmp->list); 4179 return; 4180 } 4181 } 4182 list_add(&ns->list, &ns->ctrl->namespaces); 4183 } 4184 4185 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) 4186 { 4187 struct nvme_ns *ns; 4188 struct gendisk *disk; 4189 int node = ctrl->numa_node; 4190 4191 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 4192 if (!ns) 4193 return; 4194 4195 disk = blk_mq_alloc_disk(ctrl->tagset, ns); 4196 if (IS_ERR(disk)) 4197 goto out_free_ns; 4198 disk->fops = &nvme_bdev_ops; 4199 disk->private_data = ns; 4200 4201 ns->disk = disk; 4202 ns->queue = disk->queue; 4203 4204 if (ctrl->opts && ctrl->opts->data_digest) 4205 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); 4206 4207 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 4208 if (ctrl->ops->supports_pci_p2pdma && 4209 ctrl->ops->supports_pci_p2pdma(ctrl)) 4210 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); 4211 4212 ns->ctrl = ctrl; 4213 kref_init(&ns->kref); 4214 4215 if (nvme_init_ns_head(ns, info)) 4216 goto out_cleanup_disk; 4217 4218 /* 4219 * If multipathing is enabled, the device name for all disks and not 4220 * just those that represent shared namespaces needs to be based on the 4221 * subsystem instance. Using the controller instance for private 4222 * namespaces could lead to naming collisions between shared and private 4223 * namespaces if they don't use a common numbering scheme. 4224 * 4225 * If multipathing is not enabled, disk names must use the controller 4226 * instance as shared namespaces will show up as multiple block 4227 * devices. 4228 */ 4229 if (ns->head->disk) { 4230 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, 4231 ctrl->instance, ns->head->instance); 4232 disk->flags |= GENHD_FL_HIDDEN; 4233 } else if (multipath) { 4234 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, 4235 ns->head->instance); 4236 } else { 4237 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, 4238 ns->head->instance); 4239 } 4240 4241 if (nvme_update_ns_info(ns, info)) 4242 goto out_unlink_ns; 4243 4244 down_write(&ctrl->namespaces_rwsem); 4245 nvme_ns_add_to_ctrl_list(ns); 4246 up_write(&ctrl->namespaces_rwsem); 4247 nvme_get_ctrl(ctrl); 4248 4249 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups)) 4250 goto out_cleanup_ns_from_list; 4251 4252 if (!nvme_ns_head_multipath(ns->head)) 4253 nvme_add_ns_cdev(ns); 4254 4255 nvme_mpath_add_disk(ns, info->anagrpid); 4256 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); 4257 4258 return; 4259 4260 out_cleanup_ns_from_list: 4261 nvme_put_ctrl(ctrl); 4262 down_write(&ctrl->namespaces_rwsem); 4263 list_del_init(&ns->list); 4264 up_write(&ctrl->namespaces_rwsem); 4265 out_unlink_ns: 4266 mutex_lock(&ctrl->subsys->lock); 4267 list_del_rcu(&ns->siblings); 4268 if (list_empty(&ns->head->list)) 4269 list_del_init(&ns->head->entry); 4270 mutex_unlock(&ctrl->subsys->lock); 4271 nvme_put_ns_head(ns->head); 4272 out_cleanup_disk: 4273 put_disk(disk); 4274 out_free_ns: 4275 kfree(ns); 4276 } 4277 4278 static void nvme_ns_remove(struct nvme_ns *ns) 4279 { 4280 bool last_path = false; 4281 4282 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 4283 return; 4284 4285 clear_bit(NVME_NS_READY, &ns->flags); 4286 set_capacity(ns->disk, 0); 4287 nvme_fault_inject_fini(&ns->fault_inject); 4288 4289 /* 4290 * Ensure that !NVME_NS_READY is seen by other threads to prevent 4291 * this ns going back into current_path. 4292 */ 4293 synchronize_srcu(&ns->head->srcu); 4294 4295 /* wait for concurrent submissions */ 4296 if (nvme_mpath_clear_current_path(ns)) 4297 synchronize_srcu(&ns->head->srcu); 4298 4299 mutex_lock(&ns->ctrl->subsys->lock); 4300 list_del_rcu(&ns->siblings); 4301 if (list_empty(&ns->head->list)) { 4302 list_del_init(&ns->head->entry); 4303 last_path = true; 4304 } 4305 mutex_unlock(&ns->ctrl->subsys->lock); 4306 4307 /* guarantee not available in head->list */ 4308 synchronize_rcu(); 4309 4310 if (!nvme_ns_head_multipath(ns->head)) 4311 nvme_cdev_del(&ns->cdev, &ns->cdev_device); 4312 del_gendisk(ns->disk); 4313 4314 down_write(&ns->ctrl->namespaces_rwsem); 4315 list_del_init(&ns->list); 4316 up_write(&ns->ctrl->namespaces_rwsem); 4317 4318 if (last_path) 4319 nvme_mpath_shutdown_disk(ns->head); 4320 nvme_put_ns(ns); 4321 } 4322 4323 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) 4324 { 4325 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); 4326 4327 if (ns) { 4328 nvme_ns_remove(ns); 4329 nvme_put_ns(ns); 4330 } 4331 } 4332 4333 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) 4334 { 4335 int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; 4336 4337 if (test_bit(NVME_NS_DEAD, &ns->flags)) 4338 goto out; 4339 4340 ret = NVME_SC_INVALID_NS | NVME_SC_DNR; 4341 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { 4342 dev_err(ns->ctrl->device, 4343 "identifiers changed for nsid %d\n", ns->head->ns_id); 4344 goto out; 4345 } 4346 4347 ret = nvme_update_ns_info(ns, info); 4348 out: 4349 /* 4350 * Only remove the namespace if we got a fatal error back from the 4351 * device, otherwise ignore the error and just move on. 4352 * 4353 * TODO: we should probably schedule a delayed retry here. 4354 */ 4355 if (ret > 0 && (ret & NVME_SC_DNR)) 4356 nvme_ns_remove(ns); 4357 } 4358 4359 static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) 4360 { 4361 struct nvme_ns_info info = { .nsid = nsid }; 4362 struct nvme_ns *ns; 4363 4364 if (nvme_identify_ns_descs(ctrl, &info)) 4365 return; 4366 4367 if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) { 4368 dev_warn(ctrl->device, 4369 "command set not reported for nsid: %d\n", nsid); 4370 return; 4371 } 4372 4373 /* 4374 * If available try to use the Command Set Idependent Identify Namespace 4375 * data structure to find all the generic information that is needed to 4376 * set up a namespace. If not fall back to the legacy version. 4377 */ 4378 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || 4379 (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) { 4380 if (nvme_ns_info_from_id_cs_indep(ctrl, &info)) 4381 return; 4382 } else { 4383 if (nvme_ns_info_from_identify(ctrl, &info)) 4384 return; 4385 } 4386 4387 /* 4388 * Ignore the namespace if it is not ready. We will get an AEN once it 4389 * becomes ready and restart the scan. 4390 */ 4391 if (!info.is_ready) 4392 return; 4393 4394 ns = nvme_find_get_ns(ctrl, nsid); 4395 if (ns) { 4396 nvme_validate_ns(ns, &info); 4397 nvme_put_ns(ns); 4398 } else { 4399 nvme_alloc_ns(ctrl, &info); 4400 } 4401 } 4402 4403 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 4404 unsigned nsid) 4405 { 4406 struct nvme_ns *ns, *next; 4407 LIST_HEAD(rm_list); 4408 4409 down_write(&ctrl->namespaces_rwsem); 4410 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 4411 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) 4412 list_move_tail(&ns->list, &rm_list); 4413 } 4414 up_write(&ctrl->namespaces_rwsem); 4415 4416 list_for_each_entry_safe(ns, next, &rm_list, list) 4417 nvme_ns_remove(ns); 4418 4419 } 4420 4421 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) 4422 { 4423 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); 4424 __le32 *ns_list; 4425 u32 prev = 0; 4426 int ret = 0, i; 4427 4428 if (nvme_ctrl_limited_cns(ctrl)) 4429 return -EOPNOTSUPP; 4430 4431 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 4432 if (!ns_list) 4433 return -ENOMEM; 4434 4435 for (;;) { 4436 struct nvme_command cmd = { 4437 .identify.opcode = nvme_admin_identify, 4438 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, 4439 .identify.nsid = cpu_to_le32(prev), 4440 }; 4441 4442 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, 4443 NVME_IDENTIFY_DATA_SIZE); 4444 if (ret) { 4445 dev_warn(ctrl->device, 4446 "Identify NS List failed (status=0x%x)\n", ret); 4447 goto free; 4448 } 4449 4450 for (i = 0; i < nr_entries; i++) { 4451 u32 nsid = le32_to_cpu(ns_list[i]); 4452 4453 if (!nsid) /* end of the list? */ 4454 goto out; 4455 nvme_scan_ns(ctrl, nsid); 4456 while (++prev < nsid) 4457 nvme_ns_remove_by_nsid(ctrl, prev); 4458 } 4459 } 4460 out: 4461 nvme_remove_invalid_namespaces(ctrl, prev); 4462 free: 4463 kfree(ns_list); 4464 return ret; 4465 } 4466 4467 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) 4468 { 4469 struct nvme_id_ctrl *id; 4470 u32 nn, i; 4471 4472 if (nvme_identify_ctrl(ctrl, &id)) 4473 return; 4474 nn = le32_to_cpu(id->nn); 4475 kfree(id); 4476 4477 for (i = 1; i <= nn; i++) 4478 nvme_scan_ns(ctrl, i); 4479 4480 nvme_remove_invalid_namespaces(ctrl, nn); 4481 } 4482 4483 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 4484 { 4485 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 4486 __le32 *log; 4487 int error; 4488 4489 log = kzalloc(log_size, GFP_KERNEL); 4490 if (!log) 4491 return; 4492 4493 /* 4494 * We need to read the log to clear the AEN, but we don't want to rely 4495 * on it for the changed namespace information as userspace could have 4496 * raced with us in reading the log page, which could cause us to miss 4497 * updates. 4498 */ 4499 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, 4500 NVME_CSI_NVM, log, log_size, 0); 4501 if (error) 4502 dev_warn(ctrl->device, 4503 "reading changed ns log failed: %d\n", error); 4504 4505 kfree(log); 4506 } 4507 4508 static void nvme_scan_work(struct work_struct *work) 4509 { 4510 struct nvme_ctrl *ctrl = 4511 container_of(work, struct nvme_ctrl, scan_work); 4512 int ret; 4513 4514 /* No tagset on a live ctrl means IO queues could not created */ 4515 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) 4516 return; 4517 4518 /* 4519 * Identify controller limits can change at controller reset due to 4520 * new firmware download, even though it is not common we cannot ignore 4521 * such scenario. Controller's non-mdts limits are reported in the unit 4522 * of logical blocks that is dependent on the format of attached 4523 * namespace. Hence re-read the limits at the time of ns allocation. 4524 */ 4525 ret = nvme_init_non_mdts_limits(ctrl); 4526 if (ret < 0) { 4527 dev_warn(ctrl->device, 4528 "reading non-mdts-limits failed: %d\n", ret); 4529 return; 4530 } 4531 4532 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 4533 dev_info(ctrl->device, "rescanning namespaces.\n"); 4534 nvme_clear_changed_ns_log(ctrl); 4535 } 4536 4537 mutex_lock(&ctrl->scan_lock); 4538 if (nvme_scan_ns_list(ctrl) != 0) 4539 nvme_scan_ns_sequential(ctrl); 4540 mutex_unlock(&ctrl->scan_lock); 4541 } 4542 4543 /* 4544 * This function iterates the namespace list unlocked to allow recovery from 4545 * controller failure. It is up to the caller to ensure the namespace list is 4546 * not modified by scan work while this function is executing. 4547 */ 4548 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 4549 { 4550 struct nvme_ns *ns, *next; 4551 LIST_HEAD(ns_list); 4552 4553 /* 4554 * make sure to requeue I/O to all namespaces as these 4555 * might result from the scan itself and must complete 4556 * for the scan_work to make progress 4557 */ 4558 nvme_mpath_clear_ctrl_paths(ctrl); 4559 4560 /* prevent racing with ns scanning */ 4561 flush_work(&ctrl->scan_work); 4562 4563 /* 4564 * The dead states indicates the controller was not gracefully 4565 * disconnected. In that case, we won't be able to flush any data while 4566 * removing the namespaces' disks; fail all the queues now to avoid 4567 * potentially having to clean up the failed sync later. 4568 */ 4569 if (ctrl->state == NVME_CTRL_DEAD) 4570 nvme_kill_queues(ctrl); 4571 4572 /* this is a no-op when called from the controller reset handler */ 4573 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); 4574 4575 down_write(&ctrl->namespaces_rwsem); 4576 list_splice_init(&ctrl->namespaces, &ns_list); 4577 up_write(&ctrl->namespaces_rwsem); 4578 4579 list_for_each_entry_safe(ns, next, &ns_list, list) 4580 nvme_ns_remove(ns); 4581 } 4582 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 4583 4584 static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) 4585 { 4586 struct nvme_ctrl *ctrl = 4587 container_of(dev, struct nvme_ctrl, ctrl_device); 4588 struct nvmf_ctrl_options *opts = ctrl->opts; 4589 int ret; 4590 4591 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); 4592 if (ret) 4593 return ret; 4594 4595 if (opts) { 4596 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); 4597 if (ret) 4598 return ret; 4599 4600 ret = add_uevent_var(env, "NVME_TRSVCID=%s", 4601 opts->trsvcid ?: "none"); 4602 if (ret) 4603 return ret; 4604 4605 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", 4606 opts->host_traddr ?: "none"); 4607 if (ret) 4608 return ret; 4609 4610 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", 4611 opts->host_iface ?: "none"); 4612 } 4613 return ret; 4614 } 4615 4616 static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata) 4617 { 4618 char *envp[2] = { envdata, NULL }; 4619 4620 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4621 } 4622 4623 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 4624 { 4625 char *envp[2] = { NULL, NULL }; 4626 u32 aen_result = ctrl->aen_result; 4627 4628 ctrl->aen_result = 0; 4629 if (!aen_result) 4630 return; 4631 4632 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 4633 if (!envp[0]) 4634 return; 4635 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4636 kfree(envp[0]); 4637 } 4638 4639 static void nvme_async_event_work(struct work_struct *work) 4640 { 4641 struct nvme_ctrl *ctrl = 4642 container_of(work, struct nvme_ctrl, async_event_work); 4643 4644 nvme_aen_uevent(ctrl); 4645 4646 /* 4647 * The transport drivers must guarantee AER submission here is safe by 4648 * flushing ctrl async_event_work after changing the controller state 4649 * from LIVE and before freeing the admin queue. 4650 */ 4651 if (ctrl->state == NVME_CTRL_LIVE) 4652 ctrl->ops->submit_async_event(ctrl); 4653 } 4654 4655 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 4656 { 4657 4658 u32 csts; 4659 4660 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 4661 return false; 4662 4663 if (csts == ~0) 4664 return false; 4665 4666 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 4667 } 4668 4669 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 4670 { 4671 struct nvme_fw_slot_info_log *log; 4672 4673 log = kmalloc(sizeof(*log), GFP_KERNEL); 4674 if (!log) 4675 return; 4676 4677 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, 4678 log, sizeof(*log), 0)) 4679 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 4680 kfree(log); 4681 } 4682 4683 static void nvme_fw_act_work(struct work_struct *work) 4684 { 4685 struct nvme_ctrl *ctrl = container_of(work, 4686 struct nvme_ctrl, fw_act_work); 4687 unsigned long fw_act_timeout; 4688 4689 if (ctrl->mtfa) 4690 fw_act_timeout = jiffies + 4691 msecs_to_jiffies(ctrl->mtfa * 100); 4692 else 4693 fw_act_timeout = jiffies + 4694 msecs_to_jiffies(admin_timeout * 1000); 4695 4696 nvme_stop_queues(ctrl); 4697 while (nvme_ctrl_pp_status(ctrl)) { 4698 if (time_after(jiffies, fw_act_timeout)) { 4699 dev_warn(ctrl->device, 4700 "Fw activation timeout, reset controller\n"); 4701 nvme_try_sched_reset(ctrl); 4702 return; 4703 } 4704 msleep(100); 4705 } 4706 4707 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) 4708 return; 4709 4710 nvme_start_queues(ctrl); 4711 /* read FW slot information to clear the AER */ 4712 nvme_get_fw_slot_info(ctrl); 4713 4714 queue_work(nvme_wq, &ctrl->async_event_work); 4715 } 4716 4717 static u32 nvme_aer_type(u32 result) 4718 { 4719 return result & 0x7; 4720 } 4721 4722 static u32 nvme_aer_subtype(u32 result) 4723 { 4724 return (result & 0xff00) >> 8; 4725 } 4726 4727 static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 4728 { 4729 u32 aer_notice_type = nvme_aer_subtype(result); 4730 bool requeue = true; 4731 4732 trace_nvme_async_event(ctrl, aer_notice_type); 4733 4734 switch (aer_notice_type) { 4735 case NVME_AER_NOTICE_NS_CHANGED: 4736 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 4737 nvme_queue_scan(ctrl); 4738 break; 4739 case NVME_AER_NOTICE_FW_ACT_STARTING: 4740 /* 4741 * We are (ab)using the RESETTING state to prevent subsequent 4742 * recovery actions from interfering with the controller's 4743 * firmware activation. 4744 */ 4745 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { 4746 nvme_auth_stop(ctrl); 4747 requeue = false; 4748 queue_work(nvme_wq, &ctrl->fw_act_work); 4749 } 4750 break; 4751 #ifdef CONFIG_NVME_MULTIPATH 4752 case NVME_AER_NOTICE_ANA: 4753 if (!ctrl->ana_log_buf) 4754 break; 4755 queue_work(nvme_wq, &ctrl->ana_work); 4756 break; 4757 #endif 4758 case NVME_AER_NOTICE_DISC_CHANGED: 4759 ctrl->aen_result = result; 4760 break; 4761 default: 4762 dev_warn(ctrl->device, "async event result %08x\n", result); 4763 } 4764 return requeue; 4765 } 4766 4767 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) 4768 { 4769 trace_nvme_async_event(ctrl, NVME_AER_ERROR); 4770 dev_warn(ctrl->device, "resetting controller due to AER\n"); 4771 nvme_reset_ctrl(ctrl); 4772 } 4773 4774 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 4775 volatile union nvme_result *res) 4776 { 4777 u32 result = le32_to_cpu(res->u32); 4778 u32 aer_type = nvme_aer_type(result); 4779 u32 aer_subtype = nvme_aer_subtype(result); 4780 bool requeue = true; 4781 4782 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 4783 return; 4784 4785 switch (aer_type) { 4786 case NVME_AER_NOTICE: 4787 requeue = nvme_handle_aen_notice(ctrl, result); 4788 break; 4789 case NVME_AER_ERROR: 4790 /* 4791 * For a persistent internal error, don't run async_event_work 4792 * to submit a new AER. The controller reset will do it. 4793 */ 4794 if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) { 4795 nvme_handle_aer_persistent_error(ctrl); 4796 return; 4797 } 4798 fallthrough; 4799 case NVME_AER_SMART: 4800 case NVME_AER_CSS: 4801 case NVME_AER_VS: 4802 trace_nvme_async_event(ctrl, aer_type); 4803 ctrl->aen_result = result; 4804 break; 4805 default: 4806 break; 4807 } 4808 4809 if (requeue) 4810 queue_work(nvme_wq, &ctrl->async_event_work); 4811 } 4812 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 4813 4814 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4815 const struct blk_mq_ops *ops, unsigned int flags, 4816 unsigned int cmd_size) 4817 { 4818 int ret; 4819 4820 memset(set, 0, sizeof(*set)); 4821 set->ops = ops; 4822 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 4823 if (ctrl->ops->flags & NVME_F_FABRICS) 4824 set->reserved_tags = NVMF_RESERVED_TAGS; 4825 set->numa_node = ctrl->numa_node; 4826 set->flags = flags; 4827 set->cmd_size = cmd_size; 4828 set->driver_data = ctrl; 4829 set->nr_hw_queues = 1; 4830 set->timeout = NVME_ADMIN_TIMEOUT; 4831 ret = blk_mq_alloc_tag_set(set); 4832 if (ret) 4833 return ret; 4834 4835 ctrl->admin_q = blk_mq_init_queue(set); 4836 if (IS_ERR(ctrl->admin_q)) { 4837 ret = PTR_ERR(ctrl->admin_q); 4838 goto out_free_tagset; 4839 } 4840 4841 if (ctrl->ops->flags & NVME_F_FABRICS) { 4842 ctrl->fabrics_q = blk_mq_init_queue(set); 4843 if (IS_ERR(ctrl->fabrics_q)) { 4844 ret = PTR_ERR(ctrl->fabrics_q); 4845 goto out_cleanup_admin_q; 4846 } 4847 } 4848 4849 ctrl->admin_tagset = set; 4850 return 0; 4851 4852 out_cleanup_admin_q: 4853 blk_mq_destroy_queue(ctrl->admin_q); 4854 out_free_tagset: 4855 blk_mq_free_tag_set(ctrl->admin_tagset); 4856 return ret; 4857 } 4858 EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); 4859 4860 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) 4861 { 4862 blk_mq_destroy_queue(ctrl->admin_q); 4863 if (ctrl->ops->flags & NVME_F_FABRICS) 4864 blk_mq_destroy_queue(ctrl->fabrics_q); 4865 blk_mq_free_tag_set(ctrl->admin_tagset); 4866 } 4867 EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set); 4868 4869 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4870 const struct blk_mq_ops *ops, unsigned int flags, 4871 unsigned int cmd_size) 4872 { 4873 int ret; 4874 4875 memset(set, 0, sizeof(*set)); 4876 set->ops = ops; 4877 set->queue_depth = ctrl->sqsize + 1; 4878 set->reserved_tags = NVMF_RESERVED_TAGS; 4879 set->numa_node = ctrl->numa_node; 4880 set->flags = flags; 4881 set->cmd_size = cmd_size, 4882 set->driver_data = ctrl; 4883 set->nr_hw_queues = ctrl->queue_count - 1; 4884 set->timeout = NVME_IO_TIMEOUT; 4885 if (ops->map_queues) 4886 set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; 4887 ret = blk_mq_alloc_tag_set(set); 4888 if (ret) 4889 return ret; 4890 4891 if (ctrl->ops->flags & NVME_F_FABRICS) { 4892 ctrl->connect_q = blk_mq_init_queue(set); 4893 if (IS_ERR(ctrl->connect_q)) { 4894 ret = PTR_ERR(ctrl->connect_q); 4895 goto out_free_tag_set; 4896 } 4897 } 4898 4899 ctrl->tagset = set; 4900 return 0; 4901 4902 out_free_tag_set: 4903 blk_mq_free_tag_set(set); 4904 return ret; 4905 } 4906 EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); 4907 4908 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl) 4909 { 4910 if (ctrl->ops->flags & NVME_F_FABRICS) 4911 blk_mq_destroy_queue(ctrl->connect_q); 4912 blk_mq_free_tag_set(ctrl->tagset); 4913 } 4914 EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set); 4915 4916 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 4917 { 4918 nvme_mpath_stop(ctrl); 4919 nvme_auth_stop(ctrl); 4920 nvme_stop_keep_alive(ctrl); 4921 nvme_stop_failfast_work(ctrl); 4922 flush_work(&ctrl->async_event_work); 4923 cancel_work_sync(&ctrl->fw_act_work); 4924 if (ctrl->ops->stop_ctrl) 4925 ctrl->ops->stop_ctrl(ctrl); 4926 } 4927 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 4928 4929 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 4930 { 4931 nvme_start_keep_alive(ctrl); 4932 4933 nvme_enable_aen(ctrl); 4934 4935 /* 4936 * persistent discovery controllers need to send indication to userspace 4937 * to re-read the discovery log page to learn about possible changes 4938 * that were missed. We identify persistent discovery controllers by 4939 * checking that they started once before, hence are reconnecting back. 4940 */ 4941 if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && 4942 nvme_discovery_ctrl(ctrl)) 4943 nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); 4944 4945 if (ctrl->queue_count > 1) { 4946 nvme_queue_scan(ctrl); 4947 nvme_start_queues(ctrl); 4948 nvme_mpath_update(ctrl); 4949 } 4950 4951 nvme_change_uevent(ctrl, "NVME_EVENT=connected"); 4952 } 4953 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 4954 4955 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4956 { 4957 nvme_hwmon_exit(ctrl); 4958 nvme_fault_inject_fini(&ctrl->fault_inject); 4959 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4960 cdev_device_del(&ctrl->cdev, ctrl->device); 4961 nvme_put_ctrl(ctrl); 4962 } 4963 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 4964 4965 static void nvme_free_cels(struct nvme_ctrl *ctrl) 4966 { 4967 struct nvme_effects_log *cel; 4968 unsigned long i; 4969 4970 xa_for_each(&ctrl->cels, i, cel) { 4971 xa_erase(&ctrl->cels, i); 4972 kfree(cel); 4973 } 4974 4975 xa_destroy(&ctrl->cels); 4976 } 4977 4978 static void nvme_free_ctrl(struct device *dev) 4979 { 4980 struct nvme_ctrl *ctrl = 4981 container_of(dev, struct nvme_ctrl, ctrl_device); 4982 struct nvme_subsystem *subsys = ctrl->subsys; 4983 4984 if (!subsys || ctrl->instance != subsys->instance) 4985 ida_free(&nvme_instance_ida, ctrl->instance); 4986 4987 nvme_free_cels(ctrl); 4988 nvme_mpath_uninit(ctrl); 4989 nvme_auth_stop(ctrl); 4990 nvme_auth_free(ctrl); 4991 __free_page(ctrl->discard_page); 4992 4993 if (subsys) { 4994 mutex_lock(&nvme_subsystems_lock); 4995 list_del(&ctrl->subsys_entry); 4996 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 4997 mutex_unlock(&nvme_subsystems_lock); 4998 } 4999 5000 ctrl->ops->free_ctrl(ctrl); 5001 5002 if (subsys) 5003 nvme_put_subsystem(subsys); 5004 } 5005 5006 /* 5007 * Initialize a NVMe controller structures. This needs to be called during 5008 * earliest initialization so that we have the initialized structured around 5009 * during probing. 5010 */ 5011 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 5012 const struct nvme_ctrl_ops *ops, unsigned long quirks) 5013 { 5014 int ret; 5015 5016 ctrl->state = NVME_CTRL_NEW; 5017 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 5018 spin_lock_init(&ctrl->lock); 5019 mutex_init(&ctrl->scan_lock); 5020 INIT_LIST_HEAD(&ctrl->namespaces); 5021 xa_init(&ctrl->cels); 5022 init_rwsem(&ctrl->namespaces_rwsem); 5023 ctrl->dev = dev; 5024 ctrl->ops = ops; 5025 ctrl->quirks = quirks; 5026 ctrl->numa_node = NUMA_NO_NODE; 5027 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 5028 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 5029 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 5030 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 5031 init_waitqueue_head(&ctrl->state_wq); 5032 5033 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 5034 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); 5035 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 5036 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 5037 5038 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 5039 PAGE_SIZE); 5040 ctrl->discard_page = alloc_page(GFP_KERNEL); 5041 if (!ctrl->discard_page) { 5042 ret = -ENOMEM; 5043 goto out; 5044 } 5045 5046 ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL); 5047 if (ret < 0) 5048 goto out; 5049 ctrl->instance = ret; 5050 5051 device_initialize(&ctrl->ctrl_device); 5052 ctrl->device = &ctrl->ctrl_device; 5053 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), 5054 ctrl->instance); 5055 ctrl->device->class = nvme_class; 5056 ctrl->device->parent = ctrl->dev; 5057 ctrl->device->groups = nvme_dev_attr_groups; 5058 ctrl->device->release = nvme_free_ctrl; 5059 dev_set_drvdata(ctrl->device, ctrl); 5060 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 5061 if (ret) 5062 goto out_release_instance; 5063 5064 nvme_get_ctrl(ctrl); 5065 cdev_init(&ctrl->cdev, &nvme_dev_fops); 5066 ctrl->cdev.owner = ops->module; 5067 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 5068 if (ret) 5069 goto out_free_name; 5070 5071 /* 5072 * Initialize latency tolerance controls. The sysfs files won't 5073 * be visible to userspace unless the device actually supports APST. 5074 */ 5075 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 5076 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 5077 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 5078 5079 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 5080 nvme_mpath_init_ctrl(ctrl); 5081 nvme_auth_init_ctrl(ctrl); 5082 5083 return 0; 5084 out_free_name: 5085 nvme_put_ctrl(ctrl); 5086 kfree_const(ctrl->device->kobj.name); 5087 out_release_instance: 5088 ida_free(&nvme_instance_ida, ctrl->instance); 5089 out: 5090 if (ctrl->discard_page) 5091 __free_page(ctrl->discard_page); 5092 return ret; 5093 } 5094 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 5095 5096 static void nvme_start_ns_queue(struct nvme_ns *ns) 5097 { 5098 if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags)) 5099 blk_mq_unquiesce_queue(ns->queue); 5100 } 5101 5102 static void nvme_stop_ns_queue(struct nvme_ns *ns) 5103 { 5104 if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags)) 5105 blk_mq_quiesce_queue(ns->queue); 5106 else 5107 blk_mq_wait_quiesce_done(ns->queue); 5108 } 5109 5110 /* 5111 * Prepare a queue for teardown. 5112 * 5113 * This must forcibly unquiesce queues to avoid blocking dispatch, and only set 5114 * the capacity to 0 after that to avoid blocking dispatchers that may be 5115 * holding bd_butex. This will end buffered writers dirtying pages that can't 5116 * be synced. 5117 */ 5118 static void nvme_set_queue_dying(struct nvme_ns *ns) 5119 { 5120 if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 5121 return; 5122 5123 blk_mark_disk_dead(ns->disk); 5124 nvme_start_ns_queue(ns); 5125 5126 set_capacity_and_notify(ns->disk, 0); 5127 } 5128 5129 /** 5130 * nvme_kill_queues(): Ends all namespace queues 5131 * @ctrl: the dead controller that needs to end 5132 * 5133 * Call this function when the driver determines it is unable to get the 5134 * controller in a state capable of servicing IO. 5135 */ 5136 void nvme_kill_queues(struct nvme_ctrl *ctrl) 5137 { 5138 struct nvme_ns *ns; 5139 5140 down_read(&ctrl->namespaces_rwsem); 5141 5142 /* Forcibly unquiesce queues to avoid blocking dispatch */ 5143 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) 5144 nvme_start_admin_queue(ctrl); 5145 5146 list_for_each_entry(ns, &ctrl->namespaces, list) 5147 nvme_set_queue_dying(ns); 5148 5149 up_read(&ctrl->namespaces_rwsem); 5150 } 5151 EXPORT_SYMBOL_GPL(nvme_kill_queues); 5152 5153 void nvme_unfreeze(struct nvme_ctrl *ctrl) 5154 { 5155 struct nvme_ns *ns; 5156 5157 down_read(&ctrl->namespaces_rwsem); 5158 list_for_each_entry(ns, &ctrl->namespaces, list) 5159 blk_mq_unfreeze_queue(ns->queue); 5160 up_read(&ctrl->namespaces_rwsem); 5161 } 5162 EXPORT_SYMBOL_GPL(nvme_unfreeze); 5163 5164 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 5165 { 5166 struct nvme_ns *ns; 5167 5168 down_read(&ctrl->namespaces_rwsem); 5169 list_for_each_entry(ns, &ctrl->namespaces, list) { 5170 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 5171 if (timeout <= 0) 5172 break; 5173 } 5174 up_read(&ctrl->namespaces_rwsem); 5175 return timeout; 5176 } 5177 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 5178 5179 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 5180 { 5181 struct nvme_ns *ns; 5182 5183 down_read(&ctrl->namespaces_rwsem); 5184 list_for_each_entry(ns, &ctrl->namespaces, list) 5185 blk_mq_freeze_queue_wait(ns->queue); 5186 up_read(&ctrl->namespaces_rwsem); 5187 } 5188 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 5189 5190 void nvme_start_freeze(struct nvme_ctrl *ctrl) 5191 { 5192 struct nvme_ns *ns; 5193 5194 down_read(&ctrl->namespaces_rwsem); 5195 list_for_each_entry(ns, &ctrl->namespaces, list) 5196 blk_freeze_queue_start(ns->queue); 5197 up_read(&ctrl->namespaces_rwsem); 5198 } 5199 EXPORT_SYMBOL_GPL(nvme_start_freeze); 5200 5201 void nvme_stop_queues(struct nvme_ctrl *ctrl) 5202 { 5203 struct nvme_ns *ns; 5204 5205 down_read(&ctrl->namespaces_rwsem); 5206 list_for_each_entry(ns, &ctrl->namespaces, list) 5207 nvme_stop_ns_queue(ns); 5208 up_read(&ctrl->namespaces_rwsem); 5209 } 5210 EXPORT_SYMBOL_GPL(nvme_stop_queues); 5211 5212 void nvme_start_queues(struct nvme_ctrl *ctrl) 5213 { 5214 struct nvme_ns *ns; 5215 5216 down_read(&ctrl->namespaces_rwsem); 5217 list_for_each_entry(ns, &ctrl->namespaces, list) 5218 nvme_start_ns_queue(ns); 5219 up_read(&ctrl->namespaces_rwsem); 5220 } 5221 EXPORT_SYMBOL_GPL(nvme_start_queues); 5222 5223 void nvme_stop_admin_queue(struct nvme_ctrl *ctrl) 5224 { 5225 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 5226 blk_mq_quiesce_queue(ctrl->admin_q); 5227 else 5228 blk_mq_wait_quiesce_done(ctrl->admin_q); 5229 } 5230 EXPORT_SYMBOL_GPL(nvme_stop_admin_queue); 5231 5232 void nvme_start_admin_queue(struct nvme_ctrl *ctrl) 5233 { 5234 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 5235 blk_mq_unquiesce_queue(ctrl->admin_q); 5236 } 5237 EXPORT_SYMBOL_GPL(nvme_start_admin_queue); 5238 5239 void nvme_sync_io_queues(struct nvme_ctrl *ctrl) 5240 { 5241 struct nvme_ns *ns; 5242 5243 down_read(&ctrl->namespaces_rwsem); 5244 list_for_each_entry(ns, &ctrl->namespaces, list) 5245 blk_sync_queue(ns->queue); 5246 up_read(&ctrl->namespaces_rwsem); 5247 } 5248 EXPORT_SYMBOL_GPL(nvme_sync_io_queues); 5249 5250 void nvme_sync_queues(struct nvme_ctrl *ctrl) 5251 { 5252 nvme_sync_io_queues(ctrl); 5253 if (ctrl->admin_q) 5254 blk_sync_queue(ctrl->admin_q); 5255 } 5256 EXPORT_SYMBOL_GPL(nvme_sync_queues); 5257 5258 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) 5259 { 5260 if (file->f_op != &nvme_dev_fops) 5261 return NULL; 5262 return file->private_data; 5263 } 5264 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); 5265 5266 /* 5267 * Check we didn't inadvertently grow the command structure sizes: 5268 */ 5269 static inline void _nvme_check_size(void) 5270 { 5271 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 5272 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 5273 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 5274 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 5275 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 5276 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 5277 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 5278 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 5279 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 5280 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 5281 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 5282 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 5283 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 5284 BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) != 5285 NVME_IDENTIFY_DATA_SIZE); 5286 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); 5287 BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE); 5288 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); 5289 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); 5290 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 5291 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 5292 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 5293 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 5294 BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); 5295 } 5296 5297 5298 static int __init nvme_core_init(void) 5299 { 5300 int result = -ENOMEM; 5301 5302 _nvme_check_size(); 5303 5304 nvme_wq = alloc_workqueue("nvme-wq", 5305 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 5306 if (!nvme_wq) 5307 goto out; 5308 5309 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 5310 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 5311 if (!nvme_reset_wq) 5312 goto destroy_wq; 5313 5314 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 5315 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 5316 if (!nvme_delete_wq) 5317 goto destroy_reset_wq; 5318 5319 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0, 5320 NVME_MINORS, "nvme"); 5321 if (result < 0) 5322 goto destroy_delete_wq; 5323 5324 nvme_class = class_create(THIS_MODULE, "nvme"); 5325 if (IS_ERR(nvme_class)) { 5326 result = PTR_ERR(nvme_class); 5327 goto unregister_chrdev; 5328 } 5329 nvme_class->dev_uevent = nvme_class_uevent; 5330 5331 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); 5332 if (IS_ERR(nvme_subsys_class)) { 5333 result = PTR_ERR(nvme_subsys_class); 5334 goto destroy_class; 5335 } 5336 5337 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, 5338 "nvme-generic"); 5339 if (result < 0) 5340 goto destroy_subsys_class; 5341 5342 nvme_ns_chr_class = class_create(THIS_MODULE, "nvme-generic"); 5343 if (IS_ERR(nvme_ns_chr_class)) { 5344 result = PTR_ERR(nvme_ns_chr_class); 5345 goto unregister_generic_ns; 5346 } 5347 5348 return 0; 5349 5350 unregister_generic_ns: 5351 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 5352 destroy_subsys_class: 5353 class_destroy(nvme_subsys_class); 5354 destroy_class: 5355 class_destroy(nvme_class); 5356 unregister_chrdev: 5357 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 5358 destroy_delete_wq: 5359 destroy_workqueue(nvme_delete_wq); 5360 destroy_reset_wq: 5361 destroy_workqueue(nvme_reset_wq); 5362 destroy_wq: 5363 destroy_workqueue(nvme_wq); 5364 out: 5365 return result; 5366 } 5367 5368 static void __exit nvme_core_exit(void) 5369 { 5370 class_destroy(nvme_ns_chr_class); 5371 class_destroy(nvme_subsys_class); 5372 class_destroy(nvme_class); 5373 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 5374 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 5375 destroy_workqueue(nvme_delete_wq); 5376 destroy_workqueue(nvme_reset_wq); 5377 destroy_workqueue(nvme_wq); 5378 ida_destroy(&nvme_ns_chr_minor_ida); 5379 ida_destroy(&nvme_instance_ida); 5380 } 5381 5382 MODULE_LICENSE("GPL"); 5383 MODULE_VERSION("1.0"); 5384 module_init(nvme_core_init); 5385 module_exit(nvme_core_exit); 5386