1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/blkdev.h> 8 #include <linux/blk-mq.h> 9 #include <linux/compat.h> 10 #include <linux/delay.h> 11 #include <linux/errno.h> 12 #include <linux/hdreg.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/backing-dev.h> 16 #include <linux/list_sort.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/pr.h> 20 #include <linux/ptrace.h> 21 #include <linux/nvme_ioctl.h> 22 #include <linux/pm_qos.h> 23 #include <asm/unaligned.h> 24 25 #include "nvme.h" 26 #include "fabrics.h" 27 28 #define CREATE_TRACE_POINTS 29 #include "trace.h" 30 31 #define NVME_MINORS (1U << MINORBITS) 32 33 unsigned int admin_timeout = 60; 34 module_param(admin_timeout, uint, 0644); 35 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 36 EXPORT_SYMBOL_GPL(admin_timeout); 37 38 unsigned int nvme_io_timeout = 30; 39 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 40 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 41 EXPORT_SYMBOL_GPL(nvme_io_timeout); 42 43 static unsigned char shutdown_timeout = 5; 44 module_param(shutdown_timeout, byte, 0644); 45 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 46 47 static u8 nvme_max_retries = 5; 48 module_param_named(max_retries, nvme_max_retries, byte, 0644); 49 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 50 51 static unsigned long default_ps_max_latency_us = 100000; 52 module_param(default_ps_max_latency_us, ulong, 0644); 53 MODULE_PARM_DESC(default_ps_max_latency_us, 54 "max power saving latency for new devices; use PM QOS to change per device"); 55 56 static bool force_apst; 57 module_param(force_apst, bool, 0644); 58 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 59 60 static unsigned long apst_primary_timeout_ms = 100; 61 module_param(apst_primary_timeout_ms, ulong, 0644); 62 MODULE_PARM_DESC(apst_primary_timeout_ms, 63 "primary APST timeout in ms"); 64 65 static unsigned long apst_secondary_timeout_ms = 2000; 66 module_param(apst_secondary_timeout_ms, ulong, 0644); 67 MODULE_PARM_DESC(apst_secondary_timeout_ms, 68 "secondary APST timeout in ms"); 69 70 static unsigned long apst_primary_latency_tol_us = 15000; 71 module_param(apst_primary_latency_tol_us, ulong, 0644); 72 MODULE_PARM_DESC(apst_primary_latency_tol_us, 73 "primary APST latency tolerance in us"); 74 75 static unsigned long apst_secondary_latency_tol_us = 100000; 76 module_param(apst_secondary_latency_tol_us, ulong, 0644); 77 MODULE_PARM_DESC(apst_secondary_latency_tol_us, 78 "secondary APST latency tolerance in us"); 79 80 static bool streams; 81 module_param(streams, bool, 0644); 82 MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); 83 84 /* 85 * nvme_wq - hosts nvme related works that are not reset or delete 86 * nvme_reset_wq - hosts nvme reset works 87 * nvme_delete_wq - hosts nvme delete works 88 * 89 * nvme_wq will host works such as scan, aen handling, fw activation, 90 * keep-alive, periodic reconnects etc. nvme_reset_wq 91 * runs reset works which also flush works hosted on nvme_wq for 92 * serialization purposes. nvme_delete_wq host controller deletion 93 * works which flush reset works for serialization. 94 */ 95 struct workqueue_struct *nvme_wq; 96 EXPORT_SYMBOL_GPL(nvme_wq); 97 98 struct workqueue_struct *nvme_reset_wq; 99 EXPORT_SYMBOL_GPL(nvme_reset_wq); 100 101 struct workqueue_struct *nvme_delete_wq; 102 EXPORT_SYMBOL_GPL(nvme_delete_wq); 103 104 static LIST_HEAD(nvme_subsystems); 105 static DEFINE_MUTEX(nvme_subsystems_lock); 106 107 static DEFINE_IDA(nvme_instance_ida); 108 static dev_t nvme_ctrl_base_chr_devt; 109 static struct class *nvme_class; 110 static struct class *nvme_subsys_class; 111 112 static DEFINE_IDA(nvme_ns_chr_minor_ida); 113 static dev_t nvme_ns_chr_devt; 114 static struct class *nvme_ns_chr_class; 115 116 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 117 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 118 unsigned nsid); 119 120 /* 121 * Prepare a queue for teardown. 122 * 123 * This must forcibly unquiesce queues to avoid blocking dispatch, and only set 124 * the capacity to 0 after that to avoid blocking dispatchers that may be 125 * holding bd_butex. This will end buffered writers dirtying pages that can't 126 * be synced. 127 */ 128 static void nvme_set_queue_dying(struct nvme_ns *ns) 129 { 130 if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 131 return; 132 133 blk_set_queue_dying(ns->queue); 134 blk_mq_unquiesce_queue(ns->queue); 135 136 set_capacity_and_notify(ns->disk, 0); 137 } 138 139 void nvme_queue_scan(struct nvme_ctrl *ctrl) 140 { 141 /* 142 * Only new queue scan work when admin and IO queues are both alive 143 */ 144 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) 145 queue_work(nvme_wq, &ctrl->scan_work); 146 } 147 148 /* 149 * Use this function to proceed with scheduling reset_work for a controller 150 * that had previously been set to the resetting state. This is intended for 151 * code paths that can't be interrupted by other reset attempts. A hot removal 152 * may prevent this from succeeding. 153 */ 154 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) 155 { 156 if (ctrl->state != NVME_CTRL_RESETTING) 157 return -EBUSY; 158 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 159 return -EBUSY; 160 return 0; 161 } 162 EXPORT_SYMBOL_GPL(nvme_try_sched_reset); 163 164 static void nvme_failfast_work(struct work_struct *work) 165 { 166 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 167 struct nvme_ctrl, failfast_work); 168 169 if (ctrl->state != NVME_CTRL_CONNECTING) 170 return; 171 172 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 173 dev_info(ctrl->device, "failfast expired\n"); 174 nvme_kick_requeue_lists(ctrl); 175 } 176 177 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) 178 { 179 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) 180 return; 181 182 schedule_delayed_work(&ctrl->failfast_work, 183 ctrl->opts->fast_io_fail_tmo * HZ); 184 } 185 186 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) 187 { 188 if (!ctrl->opts) 189 return; 190 191 cancel_delayed_work_sync(&ctrl->failfast_work); 192 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 193 } 194 195 196 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 197 { 198 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 199 return -EBUSY; 200 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 201 return -EBUSY; 202 return 0; 203 } 204 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 205 206 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 207 { 208 int ret; 209 210 ret = nvme_reset_ctrl(ctrl); 211 if (!ret) { 212 flush_work(&ctrl->reset_work); 213 if (ctrl->state != NVME_CTRL_LIVE) 214 ret = -ENETRESET; 215 } 216 217 return ret; 218 } 219 220 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 221 { 222 dev_info(ctrl->device, 223 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); 224 225 flush_work(&ctrl->reset_work); 226 nvme_stop_ctrl(ctrl); 227 nvme_remove_namespaces(ctrl); 228 ctrl->ops->delete_ctrl(ctrl); 229 nvme_uninit_ctrl(ctrl); 230 } 231 232 static void nvme_delete_ctrl_work(struct work_struct *work) 233 { 234 struct nvme_ctrl *ctrl = 235 container_of(work, struct nvme_ctrl, delete_work); 236 237 nvme_do_delete_ctrl(ctrl); 238 } 239 240 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 241 { 242 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 243 return -EBUSY; 244 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 245 return -EBUSY; 246 return 0; 247 } 248 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 249 250 static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 251 { 252 /* 253 * Keep a reference until nvme_do_delete_ctrl() complete, 254 * since ->delete_ctrl can free the controller. 255 */ 256 nvme_get_ctrl(ctrl); 257 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 258 nvme_do_delete_ctrl(ctrl); 259 nvme_put_ctrl(ctrl); 260 } 261 262 static blk_status_t nvme_error_status(u16 status) 263 { 264 switch (status & 0x7ff) { 265 case NVME_SC_SUCCESS: 266 return BLK_STS_OK; 267 case NVME_SC_CAP_EXCEEDED: 268 return BLK_STS_NOSPC; 269 case NVME_SC_LBA_RANGE: 270 case NVME_SC_CMD_INTERRUPTED: 271 case NVME_SC_NS_NOT_READY: 272 return BLK_STS_TARGET; 273 case NVME_SC_BAD_ATTRIBUTES: 274 case NVME_SC_ONCS_NOT_SUPPORTED: 275 case NVME_SC_INVALID_OPCODE: 276 case NVME_SC_INVALID_FIELD: 277 case NVME_SC_INVALID_NS: 278 return BLK_STS_NOTSUPP; 279 case NVME_SC_WRITE_FAULT: 280 case NVME_SC_READ_ERROR: 281 case NVME_SC_UNWRITTEN_BLOCK: 282 case NVME_SC_ACCESS_DENIED: 283 case NVME_SC_READ_ONLY: 284 case NVME_SC_COMPARE_FAILED: 285 return BLK_STS_MEDIUM; 286 case NVME_SC_GUARD_CHECK: 287 case NVME_SC_APPTAG_CHECK: 288 case NVME_SC_REFTAG_CHECK: 289 case NVME_SC_INVALID_PI: 290 return BLK_STS_PROTECTION; 291 case NVME_SC_RESERVATION_CONFLICT: 292 return BLK_STS_NEXUS; 293 case NVME_SC_HOST_PATH_ERROR: 294 return BLK_STS_TRANSPORT; 295 case NVME_SC_ZONE_TOO_MANY_ACTIVE: 296 return BLK_STS_ZONE_ACTIVE_RESOURCE; 297 case NVME_SC_ZONE_TOO_MANY_OPEN: 298 return BLK_STS_ZONE_OPEN_RESOURCE; 299 default: 300 return BLK_STS_IOERR; 301 } 302 } 303 304 static void nvme_retry_req(struct request *req) 305 { 306 unsigned long delay = 0; 307 u16 crd; 308 309 /* The mask and shift result must be <= 3 */ 310 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 311 if (crd) 312 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; 313 314 nvme_req(req)->retries++; 315 blk_mq_requeue_request(req, false); 316 blk_mq_delay_kick_requeue_list(req->q, delay); 317 } 318 319 enum nvme_disposition { 320 COMPLETE, 321 RETRY, 322 FAILOVER, 323 }; 324 325 static inline enum nvme_disposition nvme_decide_disposition(struct request *req) 326 { 327 if (likely(nvme_req(req)->status == 0)) 328 return COMPLETE; 329 330 if (blk_noretry_request(req) || 331 (nvme_req(req)->status & NVME_SC_DNR) || 332 nvme_req(req)->retries >= nvme_max_retries) 333 return COMPLETE; 334 335 if (req->cmd_flags & REQ_NVME_MPATH) { 336 if (nvme_is_path_error(nvme_req(req)->status) || 337 blk_queue_dying(req->q)) 338 return FAILOVER; 339 } else { 340 if (blk_queue_dying(req->q)) 341 return COMPLETE; 342 } 343 344 return RETRY; 345 } 346 347 static inline void nvme_end_req(struct request *req) 348 { 349 blk_status_t status = nvme_error_status(nvme_req(req)->status); 350 351 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 352 req_op(req) == REQ_OP_ZONE_APPEND) 353 req->__sector = nvme_lba_to_sect(req->q->queuedata, 354 le64_to_cpu(nvme_req(req)->result.u64)); 355 356 nvme_trace_bio_complete(req); 357 blk_mq_end_request(req, status); 358 } 359 360 void nvme_complete_rq(struct request *req) 361 { 362 trace_nvme_complete_rq(req); 363 nvme_cleanup_cmd(req); 364 365 if (nvme_req(req)->ctrl->kas) 366 nvme_req(req)->ctrl->comp_seen = true; 367 368 switch (nvme_decide_disposition(req)) { 369 case COMPLETE: 370 nvme_end_req(req); 371 return; 372 case RETRY: 373 nvme_retry_req(req); 374 return; 375 case FAILOVER: 376 nvme_failover_req(req); 377 return; 378 } 379 } 380 EXPORT_SYMBOL_GPL(nvme_complete_rq); 381 382 /* 383 * Called to unwind from ->queue_rq on a failed command submission so that the 384 * multipathing code gets called to potentially failover to another path. 385 * The caller needs to unwind all transport specific resource allocations and 386 * must return propagate the return value. 387 */ 388 blk_status_t nvme_host_path_error(struct request *req) 389 { 390 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; 391 blk_mq_set_request_complete(req); 392 nvme_complete_rq(req); 393 return BLK_STS_OK; 394 } 395 EXPORT_SYMBOL_GPL(nvme_host_path_error); 396 397 bool nvme_cancel_request(struct request *req, void *data, bool reserved) 398 { 399 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 400 "Cancelling I/O %d", req->tag); 401 402 /* don't abort one completed request */ 403 if (blk_mq_request_completed(req)) 404 return true; 405 406 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; 407 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 408 blk_mq_complete_request(req); 409 return true; 410 } 411 EXPORT_SYMBOL_GPL(nvme_cancel_request); 412 413 void nvme_cancel_tagset(struct nvme_ctrl *ctrl) 414 { 415 if (ctrl->tagset) { 416 blk_mq_tagset_busy_iter(ctrl->tagset, 417 nvme_cancel_request, ctrl); 418 blk_mq_tagset_wait_completed_request(ctrl->tagset); 419 } 420 } 421 EXPORT_SYMBOL_GPL(nvme_cancel_tagset); 422 423 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) 424 { 425 if (ctrl->admin_tagset) { 426 blk_mq_tagset_busy_iter(ctrl->admin_tagset, 427 nvme_cancel_request, ctrl); 428 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 429 } 430 } 431 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); 432 433 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 434 enum nvme_ctrl_state new_state) 435 { 436 enum nvme_ctrl_state old_state; 437 unsigned long flags; 438 bool changed = false; 439 440 spin_lock_irqsave(&ctrl->lock, flags); 441 442 old_state = ctrl->state; 443 switch (new_state) { 444 case NVME_CTRL_LIVE: 445 switch (old_state) { 446 case NVME_CTRL_NEW: 447 case NVME_CTRL_RESETTING: 448 case NVME_CTRL_CONNECTING: 449 changed = true; 450 fallthrough; 451 default: 452 break; 453 } 454 break; 455 case NVME_CTRL_RESETTING: 456 switch (old_state) { 457 case NVME_CTRL_NEW: 458 case NVME_CTRL_LIVE: 459 changed = true; 460 fallthrough; 461 default: 462 break; 463 } 464 break; 465 case NVME_CTRL_CONNECTING: 466 switch (old_state) { 467 case NVME_CTRL_NEW: 468 case NVME_CTRL_RESETTING: 469 changed = true; 470 fallthrough; 471 default: 472 break; 473 } 474 break; 475 case NVME_CTRL_DELETING: 476 switch (old_state) { 477 case NVME_CTRL_LIVE: 478 case NVME_CTRL_RESETTING: 479 case NVME_CTRL_CONNECTING: 480 changed = true; 481 fallthrough; 482 default: 483 break; 484 } 485 break; 486 case NVME_CTRL_DELETING_NOIO: 487 switch (old_state) { 488 case NVME_CTRL_DELETING: 489 case NVME_CTRL_DEAD: 490 changed = true; 491 fallthrough; 492 default: 493 break; 494 } 495 break; 496 case NVME_CTRL_DEAD: 497 switch (old_state) { 498 case NVME_CTRL_DELETING: 499 changed = true; 500 fallthrough; 501 default: 502 break; 503 } 504 break; 505 default: 506 break; 507 } 508 509 if (changed) { 510 ctrl->state = new_state; 511 wake_up_all(&ctrl->state_wq); 512 } 513 514 spin_unlock_irqrestore(&ctrl->lock, flags); 515 if (!changed) 516 return false; 517 518 if (ctrl->state == NVME_CTRL_LIVE) { 519 if (old_state == NVME_CTRL_CONNECTING) 520 nvme_stop_failfast_work(ctrl); 521 nvme_kick_requeue_lists(ctrl); 522 } else if (ctrl->state == NVME_CTRL_CONNECTING && 523 old_state == NVME_CTRL_RESETTING) { 524 nvme_start_failfast_work(ctrl); 525 } 526 return changed; 527 } 528 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 529 530 /* 531 * Returns true for sink states that can't ever transition back to live. 532 */ 533 static bool nvme_state_terminal(struct nvme_ctrl *ctrl) 534 { 535 switch (ctrl->state) { 536 case NVME_CTRL_NEW: 537 case NVME_CTRL_LIVE: 538 case NVME_CTRL_RESETTING: 539 case NVME_CTRL_CONNECTING: 540 return false; 541 case NVME_CTRL_DELETING: 542 case NVME_CTRL_DELETING_NOIO: 543 case NVME_CTRL_DEAD: 544 return true; 545 default: 546 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); 547 return true; 548 } 549 } 550 551 /* 552 * Waits for the controller state to be resetting, or returns false if it is 553 * not possible to ever transition to that state. 554 */ 555 bool nvme_wait_reset(struct nvme_ctrl *ctrl) 556 { 557 wait_event(ctrl->state_wq, 558 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || 559 nvme_state_terminal(ctrl)); 560 return ctrl->state == NVME_CTRL_RESETTING; 561 } 562 EXPORT_SYMBOL_GPL(nvme_wait_reset); 563 564 static void nvme_free_ns_head(struct kref *ref) 565 { 566 struct nvme_ns_head *head = 567 container_of(ref, struct nvme_ns_head, ref); 568 569 nvme_mpath_remove_disk(head); 570 ida_simple_remove(&head->subsys->ns_ida, head->instance); 571 cleanup_srcu_struct(&head->srcu); 572 nvme_put_subsystem(head->subsys); 573 kfree(head); 574 } 575 576 bool nvme_tryget_ns_head(struct nvme_ns_head *head) 577 { 578 return kref_get_unless_zero(&head->ref); 579 } 580 581 void nvme_put_ns_head(struct nvme_ns_head *head) 582 { 583 kref_put(&head->ref, nvme_free_ns_head); 584 } 585 586 static void nvme_free_ns(struct kref *kref) 587 { 588 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 589 590 if (ns->ndev) 591 nvme_nvm_unregister(ns); 592 593 put_disk(ns->disk); 594 nvme_put_ns_head(ns->head); 595 nvme_put_ctrl(ns->ctrl); 596 kfree(ns); 597 } 598 599 static inline bool nvme_get_ns(struct nvme_ns *ns) 600 { 601 return kref_get_unless_zero(&ns->kref); 602 } 603 604 void nvme_put_ns(struct nvme_ns *ns) 605 { 606 kref_put(&ns->kref, nvme_free_ns); 607 } 608 EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); 609 610 static inline void nvme_clear_nvme_request(struct request *req) 611 { 612 nvme_req(req)->retries = 0; 613 nvme_req(req)->flags = 0; 614 req->rq_flags |= RQF_DONTPREP; 615 } 616 617 static inline unsigned int nvme_req_op(struct nvme_command *cmd) 618 { 619 return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 620 } 621 622 static inline void nvme_init_request(struct request *req, 623 struct nvme_command *cmd) 624 { 625 if (req->q->queuedata) 626 req->timeout = NVME_IO_TIMEOUT; 627 else /* no queuedata implies admin queue */ 628 req->timeout = NVME_ADMIN_TIMEOUT; 629 630 /* passthru commands should let the driver set the SGL flags */ 631 cmd->common.flags &= ~NVME_CMD_SGL_ALL; 632 633 req->cmd_flags |= REQ_FAILFAST_DRIVER; 634 nvme_clear_nvme_request(req); 635 memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); 636 } 637 638 struct request *nvme_alloc_request(struct request_queue *q, 639 struct nvme_command *cmd, blk_mq_req_flags_t flags) 640 { 641 struct request *req; 642 643 req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); 644 if (!IS_ERR(req)) 645 nvme_init_request(req, cmd); 646 return req; 647 } 648 EXPORT_SYMBOL_GPL(nvme_alloc_request); 649 650 static struct request *nvme_alloc_request_qid(struct request_queue *q, 651 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) 652 { 653 struct request *req; 654 655 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, 656 qid ? qid - 1 : 0); 657 if (!IS_ERR(req)) 658 nvme_init_request(req, cmd); 659 return req; 660 } 661 662 /* 663 * For something we're not in a state to send to the device the default action 664 * is to busy it and retry it after the controller state is recovered. However, 665 * if the controller is deleting or if anything is marked for failfast or 666 * nvme multipath it is immediately failed. 667 * 668 * Note: commands used to initialize the controller will be marked for failfast. 669 * Note: nvme cli/ioctl commands are marked for failfast. 670 */ 671 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, 672 struct request *rq) 673 { 674 if (ctrl->state != NVME_CTRL_DELETING_NOIO && 675 ctrl->state != NVME_CTRL_DEAD && 676 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && 677 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 678 return BLK_STS_RESOURCE; 679 return nvme_host_path_error(rq); 680 } 681 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); 682 683 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 684 bool queue_live) 685 { 686 struct nvme_request *req = nvme_req(rq); 687 688 /* 689 * currently we have a problem sending passthru commands 690 * on the admin_q if the controller is not LIVE because we can't 691 * make sure that they are going out after the admin connect, 692 * controller enable and/or other commands in the initialization 693 * sequence. until the controller will be LIVE, fail with 694 * BLK_STS_RESOURCE so that they will be rescheduled. 695 */ 696 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) 697 return false; 698 699 if (ctrl->ops->flags & NVME_F_FABRICS) { 700 /* 701 * Only allow commands on a live queue, except for the connect 702 * command, which is require to set the queue live in the 703 * appropinquate states. 704 */ 705 switch (ctrl->state) { 706 case NVME_CTRL_CONNECTING: 707 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && 708 req->cmd->fabrics.fctype == nvme_fabrics_type_connect) 709 return true; 710 break; 711 default: 712 break; 713 case NVME_CTRL_DEAD: 714 return false; 715 } 716 } 717 718 return queue_live; 719 } 720 EXPORT_SYMBOL_GPL(__nvme_check_ready); 721 722 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) 723 { 724 struct nvme_command c = { }; 725 726 c.directive.opcode = nvme_admin_directive_send; 727 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); 728 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; 729 c.directive.dtype = NVME_DIR_IDENTIFY; 730 c.directive.tdtype = NVME_DIR_STREAMS; 731 c.directive.endir = enable ? NVME_DIR_ENDIR : 0; 732 733 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); 734 } 735 736 static int nvme_disable_streams(struct nvme_ctrl *ctrl) 737 { 738 return nvme_toggle_streams(ctrl, false); 739 } 740 741 static int nvme_enable_streams(struct nvme_ctrl *ctrl) 742 { 743 return nvme_toggle_streams(ctrl, true); 744 } 745 746 static int nvme_get_stream_params(struct nvme_ctrl *ctrl, 747 struct streams_directive_params *s, u32 nsid) 748 { 749 struct nvme_command c = { }; 750 751 memset(s, 0, sizeof(*s)); 752 753 c.directive.opcode = nvme_admin_directive_recv; 754 c.directive.nsid = cpu_to_le32(nsid); 755 c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s))); 756 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 757 c.directive.dtype = NVME_DIR_STREAMS; 758 759 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); 760 } 761 762 static int nvme_configure_directives(struct nvme_ctrl *ctrl) 763 { 764 struct streams_directive_params s; 765 int ret; 766 767 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) 768 return 0; 769 if (!streams) 770 return 0; 771 772 ret = nvme_enable_streams(ctrl); 773 if (ret) 774 return ret; 775 776 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); 777 if (ret) 778 goto out_disable_stream; 779 780 ctrl->nssa = le16_to_cpu(s.nssa); 781 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { 782 dev_info(ctrl->device, "too few streams (%u) available\n", 783 ctrl->nssa); 784 goto out_disable_stream; 785 } 786 787 ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); 788 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); 789 return 0; 790 791 out_disable_stream: 792 nvme_disable_streams(ctrl); 793 return ret; 794 } 795 796 /* 797 * Check if 'req' has a write hint associated with it. If it does, assign 798 * a valid namespace stream to the write. 799 */ 800 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, 801 struct request *req, u16 *control, 802 u32 *dsmgmt) 803 { 804 enum rw_hint streamid = req->write_hint; 805 806 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE) 807 streamid = 0; 808 else { 809 streamid--; 810 if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) 811 return; 812 813 *control |= NVME_RW_DTYPE_STREAMS; 814 *dsmgmt |= streamid << 16; 815 } 816 817 if (streamid < ARRAY_SIZE(req->q->write_hints)) 818 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; 819 } 820 821 static inline void nvme_setup_flush(struct nvme_ns *ns, 822 struct nvme_command *cmnd) 823 { 824 cmnd->common.opcode = nvme_cmd_flush; 825 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 826 } 827 828 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 829 struct nvme_command *cmnd) 830 { 831 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 832 struct nvme_dsm_range *range; 833 struct bio *bio; 834 835 /* 836 * Some devices do not consider the DSM 'Number of Ranges' field when 837 * determining how much data to DMA. Always allocate memory for maximum 838 * number of segments to prevent device reading beyond end of buffer. 839 */ 840 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; 841 842 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); 843 if (!range) { 844 /* 845 * If we fail allocation our range, fallback to the controller 846 * discard page. If that's also busy, it's safe to return 847 * busy, as we know we can make progress once that's freed. 848 */ 849 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 850 return BLK_STS_RESOURCE; 851 852 range = page_address(ns->ctrl->discard_page); 853 } 854 855 __rq_for_each_bio(bio, req) { 856 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); 857 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 858 859 if (n < segments) { 860 range[n].cattr = cpu_to_le32(0); 861 range[n].nlb = cpu_to_le32(nlb); 862 range[n].slba = cpu_to_le64(slba); 863 } 864 n++; 865 } 866 867 if (WARN_ON_ONCE(n != segments)) { 868 if (virt_to_page(range) == ns->ctrl->discard_page) 869 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 870 else 871 kfree(range); 872 return BLK_STS_IOERR; 873 } 874 875 cmnd->dsm.opcode = nvme_cmd_dsm; 876 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 877 cmnd->dsm.nr = cpu_to_le32(segments - 1); 878 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 879 880 req->special_vec.bv_page = virt_to_page(range); 881 req->special_vec.bv_offset = offset_in_page(range); 882 req->special_vec.bv_len = alloc_size; 883 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 884 885 return BLK_STS_OK; 886 } 887 888 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 889 struct request *req, struct nvme_command *cmnd) 890 { 891 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 892 return nvme_setup_discard(ns, req, cmnd); 893 894 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 895 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 896 cmnd->write_zeroes.slba = 897 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 898 cmnd->write_zeroes.length = 899 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 900 cmnd->write_zeroes.control = 0; 901 return BLK_STS_OK; 902 } 903 904 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 905 struct request *req, struct nvme_command *cmnd, 906 enum nvme_opcode op) 907 { 908 struct nvme_ctrl *ctrl = ns->ctrl; 909 u16 control = 0; 910 u32 dsmgmt = 0; 911 912 if (req->cmd_flags & REQ_FUA) 913 control |= NVME_RW_FUA; 914 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 915 control |= NVME_RW_LR; 916 917 if (req->cmd_flags & REQ_RAHEAD) 918 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 919 920 cmnd->rw.opcode = op; 921 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 922 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 923 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 924 925 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) 926 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); 927 928 if (ns->ms) { 929 /* 930 * If formated with metadata, the block layer always provides a 931 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 932 * we enable the PRACT bit for protection information or set the 933 * namespace capacity to zero to prevent any I/O. 934 */ 935 if (!blk_integrity_rq(req)) { 936 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 937 return BLK_STS_NOTSUPP; 938 control |= NVME_RW_PRINFO_PRACT; 939 } 940 941 switch (ns->pi_type) { 942 case NVME_NS_DPS_PI_TYPE3: 943 control |= NVME_RW_PRINFO_PRCHK_GUARD; 944 break; 945 case NVME_NS_DPS_PI_TYPE1: 946 case NVME_NS_DPS_PI_TYPE2: 947 control |= NVME_RW_PRINFO_PRCHK_GUARD | 948 NVME_RW_PRINFO_PRCHK_REF; 949 if (op == nvme_cmd_zone_append) 950 control |= NVME_RW_APPEND_PIREMAP; 951 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 952 break; 953 } 954 } 955 956 cmnd->rw.control = cpu_to_le16(control); 957 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 958 return 0; 959 } 960 961 void nvme_cleanup_cmd(struct request *req) 962 { 963 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 964 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 965 struct page *page = req->special_vec.bv_page; 966 967 if (page == ctrl->discard_page) 968 clear_bit_unlock(0, &ctrl->discard_page_busy); 969 else 970 kfree(page_address(page) + req->special_vec.bv_offset); 971 } 972 } 973 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 974 975 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 976 { 977 struct nvme_command *cmd = nvme_req(req)->cmd; 978 blk_status_t ret = BLK_STS_OK; 979 980 if (!(req->rq_flags & RQF_DONTPREP)) { 981 nvme_clear_nvme_request(req); 982 memset(cmd, 0, sizeof(*cmd)); 983 } 984 985 switch (req_op(req)) { 986 case REQ_OP_DRV_IN: 987 case REQ_OP_DRV_OUT: 988 /* these are setup prior to execution in nvme_init_request() */ 989 break; 990 case REQ_OP_FLUSH: 991 nvme_setup_flush(ns, cmd); 992 break; 993 case REQ_OP_ZONE_RESET_ALL: 994 case REQ_OP_ZONE_RESET: 995 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); 996 break; 997 case REQ_OP_ZONE_OPEN: 998 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); 999 break; 1000 case REQ_OP_ZONE_CLOSE: 1001 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); 1002 break; 1003 case REQ_OP_ZONE_FINISH: 1004 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); 1005 break; 1006 case REQ_OP_WRITE_ZEROES: 1007 ret = nvme_setup_write_zeroes(ns, req, cmd); 1008 break; 1009 case REQ_OP_DISCARD: 1010 ret = nvme_setup_discard(ns, req, cmd); 1011 break; 1012 case REQ_OP_READ: 1013 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); 1014 break; 1015 case REQ_OP_WRITE: 1016 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); 1017 break; 1018 case REQ_OP_ZONE_APPEND: 1019 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); 1020 break; 1021 default: 1022 WARN_ON_ONCE(1); 1023 return BLK_STS_IOERR; 1024 } 1025 1026 cmd->common.command_id = req->tag; 1027 trace_nvme_setup_cmd(req, cmd); 1028 return ret; 1029 } 1030 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 1031 1032 static void nvme_end_sync_rq(struct request *rq, blk_status_t error) 1033 { 1034 struct completion *waiting = rq->end_io_data; 1035 1036 rq->end_io_data = NULL; 1037 complete(waiting); 1038 } 1039 1040 static void nvme_execute_rq_polled(struct request_queue *q, 1041 struct gendisk *bd_disk, struct request *rq, int at_head) 1042 { 1043 DECLARE_COMPLETION_ONSTACK(wait); 1044 1045 WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)); 1046 1047 rq->cmd_flags |= REQ_HIPRI; 1048 rq->end_io_data = &wait; 1049 blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq); 1050 1051 while (!completion_done(&wait)) { 1052 blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true); 1053 cond_resched(); 1054 } 1055 } 1056 1057 /* 1058 * Returns 0 on success. If the result is negative, it's a Linux error code; 1059 * if the result is positive, it's an NVM Express status code 1060 */ 1061 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1062 union nvme_result *result, void *buffer, unsigned bufflen, 1063 unsigned timeout, int qid, int at_head, 1064 blk_mq_req_flags_t flags, bool poll) 1065 { 1066 struct request *req; 1067 int ret; 1068 1069 if (qid == NVME_QID_ANY) 1070 req = nvme_alloc_request(q, cmd, flags); 1071 else 1072 req = nvme_alloc_request_qid(q, cmd, flags, qid); 1073 if (IS_ERR(req)) 1074 return PTR_ERR(req); 1075 1076 if (timeout) 1077 req->timeout = timeout; 1078 1079 if (buffer && bufflen) { 1080 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 1081 if (ret) 1082 goto out; 1083 } 1084 1085 if (poll) 1086 nvme_execute_rq_polled(req->q, NULL, req, at_head); 1087 else 1088 blk_execute_rq(NULL, req, at_head); 1089 if (result) 1090 *result = nvme_req(req)->result; 1091 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 1092 ret = -EINTR; 1093 else 1094 ret = nvme_req(req)->status; 1095 out: 1096 blk_mq_free_request(req); 1097 return ret; 1098 } 1099 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 1100 1101 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1102 void *buffer, unsigned bufflen) 1103 { 1104 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, 1105 NVME_QID_ANY, 0, 0, false); 1106 } 1107 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 1108 1109 static u32 nvme_known_admin_effects(u8 opcode) 1110 { 1111 switch (opcode) { 1112 case nvme_admin_format_nvm: 1113 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC | 1114 NVME_CMD_EFFECTS_CSE_MASK; 1115 case nvme_admin_sanitize_nvm: 1116 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK; 1117 default: 1118 break; 1119 } 1120 return 0; 1121 } 1122 1123 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1124 { 1125 u32 effects = 0; 1126 1127 if (ns) { 1128 if (ns->head->effects) 1129 effects = le32_to_cpu(ns->head->effects->iocs[opcode]); 1130 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1131 dev_warn_once(ctrl->device, 1132 "IO command:%02x has unhandled effects:%08x\n", 1133 opcode, effects); 1134 return 0; 1135 } 1136 1137 if (ctrl->effects) 1138 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1139 effects |= nvme_known_admin_effects(opcode); 1140 1141 return effects; 1142 } 1143 EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); 1144 1145 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1146 u8 opcode) 1147 { 1148 u32 effects = nvme_command_effects(ctrl, ns, opcode); 1149 1150 /* 1151 * For simplicity, IO to all namespaces is quiesced even if the command 1152 * effects say only one namespace is affected. 1153 */ 1154 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1155 mutex_lock(&ctrl->scan_lock); 1156 mutex_lock(&ctrl->subsys->lock); 1157 nvme_mpath_start_freeze(ctrl->subsys); 1158 nvme_mpath_wait_freeze(ctrl->subsys); 1159 nvme_start_freeze(ctrl); 1160 nvme_wait_freeze(ctrl); 1161 } 1162 return effects; 1163 } 1164 1165 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1166 { 1167 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1168 nvme_unfreeze(ctrl); 1169 nvme_mpath_unfreeze(ctrl->subsys); 1170 mutex_unlock(&ctrl->subsys->lock); 1171 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); 1172 mutex_unlock(&ctrl->scan_lock); 1173 } 1174 if (effects & NVME_CMD_EFFECTS_CCC) 1175 nvme_init_ctrl_finish(ctrl); 1176 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { 1177 nvme_queue_scan(ctrl); 1178 flush_work(&ctrl->scan_work); 1179 } 1180 } 1181 1182 void nvme_execute_passthru_rq(struct request *rq) 1183 { 1184 struct nvme_command *cmd = nvme_req(rq)->cmd; 1185 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; 1186 struct nvme_ns *ns = rq->q->queuedata; 1187 struct gendisk *disk = ns ? ns->disk : NULL; 1188 u32 effects; 1189 1190 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); 1191 blk_execute_rq(disk, rq, 0); 1192 if (effects) /* nothing to be done for zero cmd effects */ 1193 nvme_passthru_end(ctrl, effects); 1194 } 1195 EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU); 1196 1197 /* 1198 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: 1199 * 1200 * The host should send Keep Alive commands at half of the Keep Alive Timeout 1201 * accounting for transport roundtrip times [..]. 1202 */ 1203 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) 1204 { 1205 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2); 1206 } 1207 1208 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) 1209 { 1210 struct nvme_ctrl *ctrl = rq->end_io_data; 1211 unsigned long flags; 1212 bool startka = false; 1213 1214 blk_mq_free_request(rq); 1215 1216 if (status) { 1217 dev_err(ctrl->device, 1218 "failed nvme_keep_alive_end_io error=%d\n", 1219 status); 1220 return; 1221 } 1222 1223 ctrl->comp_seen = false; 1224 spin_lock_irqsave(&ctrl->lock, flags); 1225 if (ctrl->state == NVME_CTRL_LIVE || 1226 ctrl->state == NVME_CTRL_CONNECTING) 1227 startka = true; 1228 spin_unlock_irqrestore(&ctrl->lock, flags); 1229 if (startka) 1230 nvme_queue_keep_alive_work(ctrl); 1231 } 1232 1233 static void nvme_keep_alive_work(struct work_struct *work) 1234 { 1235 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 1236 struct nvme_ctrl, ka_work); 1237 bool comp_seen = ctrl->comp_seen; 1238 struct request *rq; 1239 1240 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1241 dev_dbg(ctrl->device, 1242 "reschedule traffic based keep-alive timer\n"); 1243 ctrl->comp_seen = false; 1244 nvme_queue_keep_alive_work(ctrl); 1245 return; 1246 } 1247 1248 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, 1249 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 1250 if (IS_ERR(rq)) { 1251 /* allocation failure, reset the controller */ 1252 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); 1253 nvme_reset_ctrl(ctrl); 1254 return; 1255 } 1256 1257 rq->timeout = ctrl->kato * HZ; 1258 rq->end_io_data = ctrl; 1259 blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); 1260 } 1261 1262 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 1263 { 1264 if (unlikely(ctrl->kato == 0)) 1265 return; 1266 1267 nvme_queue_keep_alive_work(ctrl); 1268 } 1269 1270 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 1271 { 1272 if (unlikely(ctrl->kato == 0)) 1273 return; 1274 1275 cancel_delayed_work_sync(&ctrl->ka_work); 1276 } 1277 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 1278 1279 /* 1280 * In NVMe 1.0 the CNS field was just a binary controller or namespace 1281 * flag, thus sending any new CNS opcodes has a big chance of not working. 1282 * Qemu unfortunately had that bug after reporting a 1.1 version compliance 1283 * (but not for any later version). 1284 */ 1285 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) 1286 { 1287 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) 1288 return ctrl->vs < NVME_VS(1, 2, 0); 1289 return ctrl->vs < NVME_VS(1, 1, 0); 1290 } 1291 1292 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 1293 { 1294 struct nvme_command c = { }; 1295 int error; 1296 1297 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1298 c.identify.opcode = nvme_admin_identify; 1299 c.identify.cns = NVME_ID_CNS_CTRL; 1300 1301 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 1302 if (!*id) 1303 return -ENOMEM; 1304 1305 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1306 sizeof(struct nvme_id_ctrl)); 1307 if (error) 1308 kfree(*id); 1309 return error; 1310 } 1311 1312 static bool nvme_multi_css(struct nvme_ctrl *ctrl) 1313 { 1314 return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; 1315 } 1316 1317 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, 1318 struct nvme_ns_id_desc *cur, bool *csi_seen) 1319 { 1320 const char *warn_str = "ctrl returned bogus length:"; 1321 void *data = cur; 1322 1323 switch (cur->nidt) { 1324 case NVME_NIDT_EUI64: 1325 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1326 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", 1327 warn_str, cur->nidl); 1328 return -1; 1329 } 1330 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); 1331 return NVME_NIDT_EUI64_LEN; 1332 case NVME_NIDT_NGUID: 1333 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1334 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", 1335 warn_str, cur->nidl); 1336 return -1; 1337 } 1338 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); 1339 return NVME_NIDT_NGUID_LEN; 1340 case NVME_NIDT_UUID: 1341 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1342 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", 1343 warn_str, cur->nidl); 1344 return -1; 1345 } 1346 uuid_copy(&ids->uuid, data + sizeof(*cur)); 1347 return NVME_NIDT_UUID_LEN; 1348 case NVME_NIDT_CSI: 1349 if (cur->nidl != NVME_NIDT_CSI_LEN) { 1350 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", 1351 warn_str, cur->nidl); 1352 return -1; 1353 } 1354 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); 1355 *csi_seen = true; 1356 return NVME_NIDT_CSI_LEN; 1357 default: 1358 /* Skip unknown types */ 1359 return cur->nidl; 1360 } 1361 } 1362 1363 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, 1364 struct nvme_ns_ids *ids) 1365 { 1366 struct nvme_command c = { }; 1367 bool csi_seen = false; 1368 int status, pos, len; 1369 void *data; 1370 1371 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) 1372 return 0; 1373 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) 1374 return 0; 1375 1376 c.identify.opcode = nvme_admin_identify; 1377 c.identify.nsid = cpu_to_le32(nsid); 1378 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1379 1380 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1381 if (!data) 1382 return -ENOMEM; 1383 1384 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1385 NVME_IDENTIFY_DATA_SIZE); 1386 if (status) { 1387 dev_warn(ctrl->device, 1388 "Identify Descriptors failed (nsid=%u, status=0x%x)\n", 1389 nsid, status); 1390 goto free_data; 1391 } 1392 1393 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1394 struct nvme_ns_id_desc *cur = data + pos; 1395 1396 if (cur->nidl == 0) 1397 break; 1398 1399 len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen); 1400 if (len < 0) 1401 break; 1402 1403 len += sizeof(*cur); 1404 } 1405 1406 if (nvme_multi_css(ctrl) && !csi_seen) { 1407 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", 1408 nsid); 1409 status = -EINVAL; 1410 } 1411 1412 free_data: 1413 kfree(data); 1414 return status; 1415 } 1416 1417 static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, 1418 struct nvme_ns_ids *ids, struct nvme_id_ns **id) 1419 { 1420 struct nvme_command c = { }; 1421 int error; 1422 1423 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1424 c.identify.opcode = nvme_admin_identify; 1425 c.identify.nsid = cpu_to_le32(nsid); 1426 c.identify.cns = NVME_ID_CNS_NS; 1427 1428 *id = kmalloc(sizeof(**id), GFP_KERNEL); 1429 if (!*id) 1430 return -ENOMEM; 1431 1432 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); 1433 if (error) { 1434 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1435 goto out_free_id; 1436 } 1437 1438 error = NVME_SC_INVALID_NS | NVME_SC_DNR; 1439 if ((*id)->ncap == 0) /* namespace not allocated or attached */ 1440 goto out_free_id; 1441 1442 if (ctrl->vs >= NVME_VS(1, 1, 0) && 1443 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 1444 memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64)); 1445 if (ctrl->vs >= NVME_VS(1, 2, 0) && 1446 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 1447 memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid)); 1448 1449 return 0; 1450 1451 out_free_id: 1452 kfree(*id); 1453 return error; 1454 } 1455 1456 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, 1457 unsigned int dword11, void *buffer, size_t buflen, u32 *result) 1458 { 1459 union nvme_result res = { 0 }; 1460 struct nvme_command c = { }; 1461 int ret; 1462 1463 c.features.opcode = op; 1464 c.features.fid = cpu_to_le32(fid); 1465 c.features.dword11 = cpu_to_le32(dword11); 1466 1467 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1468 buffer, buflen, 0, NVME_QID_ANY, 0, 0, false); 1469 if (ret >= 0 && result) 1470 *result = le32_to_cpu(res.u32); 1471 return ret; 1472 } 1473 1474 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 1475 unsigned int dword11, void *buffer, size_t buflen, 1476 u32 *result) 1477 { 1478 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, 1479 buflen, result); 1480 } 1481 EXPORT_SYMBOL_GPL(nvme_set_features); 1482 1483 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 1484 unsigned int dword11, void *buffer, size_t buflen, 1485 u32 *result) 1486 { 1487 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, 1488 buflen, result); 1489 } 1490 EXPORT_SYMBOL_GPL(nvme_get_features); 1491 1492 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1493 { 1494 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1495 u32 result; 1496 int status, nr_io_queues; 1497 1498 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1499 &result); 1500 if (status < 0) 1501 return status; 1502 1503 /* 1504 * Degraded controllers might return an error when setting the queue 1505 * count. We still want to be able to bring them online and offer 1506 * access to the admin queue, as that might be only way to fix them up. 1507 */ 1508 if (status > 0) { 1509 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1510 *count = 0; 1511 } else { 1512 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1513 *count = min(*count, nr_io_queues); 1514 } 1515 1516 return 0; 1517 } 1518 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1519 1520 #define NVME_AEN_SUPPORTED \ 1521 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ 1522 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) 1523 1524 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1525 { 1526 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1527 int status; 1528 1529 if (!supported_aens) 1530 return; 1531 1532 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1533 NULL, 0, &result); 1534 if (status) 1535 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1536 supported_aens); 1537 1538 queue_work(nvme_wq, &ctrl->async_event_work); 1539 } 1540 1541 static int nvme_ns_open(struct nvme_ns *ns) 1542 { 1543 1544 /* should never be called due to GENHD_FL_HIDDEN */ 1545 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) 1546 goto fail; 1547 if (!nvme_get_ns(ns)) 1548 goto fail; 1549 if (!try_module_get(ns->ctrl->ops->module)) 1550 goto fail_put_ns; 1551 1552 return 0; 1553 1554 fail_put_ns: 1555 nvme_put_ns(ns); 1556 fail: 1557 return -ENXIO; 1558 } 1559 1560 static void nvme_ns_release(struct nvme_ns *ns) 1561 { 1562 1563 module_put(ns->ctrl->ops->module); 1564 nvme_put_ns(ns); 1565 } 1566 1567 static int nvme_open(struct block_device *bdev, fmode_t mode) 1568 { 1569 return nvme_ns_open(bdev->bd_disk->private_data); 1570 } 1571 1572 static void nvme_release(struct gendisk *disk, fmode_t mode) 1573 { 1574 nvme_ns_release(disk->private_data); 1575 } 1576 1577 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1578 { 1579 /* some standard values */ 1580 geo->heads = 1 << 6; 1581 geo->sectors = 1 << 5; 1582 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1583 return 0; 1584 } 1585 1586 #ifdef CONFIG_BLK_DEV_INTEGRITY 1587 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type, 1588 u32 max_integrity_segments) 1589 { 1590 struct blk_integrity integrity = { }; 1591 1592 switch (pi_type) { 1593 case NVME_NS_DPS_PI_TYPE3: 1594 integrity.profile = &t10_pi_type3_crc; 1595 integrity.tag_size = sizeof(u16) + sizeof(u32); 1596 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1597 break; 1598 case NVME_NS_DPS_PI_TYPE1: 1599 case NVME_NS_DPS_PI_TYPE2: 1600 integrity.profile = &t10_pi_type1_crc; 1601 integrity.tag_size = sizeof(u16); 1602 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1603 break; 1604 default: 1605 integrity.profile = NULL; 1606 break; 1607 } 1608 integrity.tuple_size = ms; 1609 blk_integrity_register(disk, &integrity); 1610 blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); 1611 } 1612 #else 1613 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type, 1614 u32 max_integrity_segments) 1615 { 1616 } 1617 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1618 1619 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) 1620 { 1621 struct nvme_ctrl *ctrl = ns->ctrl; 1622 struct request_queue *queue = disk->queue; 1623 u32 size = queue_logical_block_size(queue); 1624 1625 if (ctrl->max_discard_sectors == 0) { 1626 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue); 1627 return; 1628 } 1629 1630 if (ctrl->nr_streams && ns->sws && ns->sgs) 1631 size *= ns->sws * ns->sgs; 1632 1633 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1634 NVME_DSM_MAX_RANGES); 1635 1636 queue->limits.discard_alignment = 0; 1637 queue->limits.discard_granularity = size; 1638 1639 /* If discard is already enabled, don't reset queue limits */ 1640 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue)) 1641 return; 1642 1643 blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors); 1644 blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); 1645 1646 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1647 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1648 } 1649 1650 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) 1651 { 1652 return !uuid_is_null(&ids->uuid) || 1653 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) || 1654 memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 1655 } 1656 1657 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1658 { 1659 return uuid_equal(&a->uuid, &b->uuid) && 1660 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1661 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && 1662 a->csi == b->csi; 1663 } 1664 1665 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1666 u32 *phys_bs, u32 *io_opt) 1667 { 1668 struct streams_directive_params s; 1669 int ret; 1670 1671 if (!ctrl->nr_streams) 1672 return 0; 1673 1674 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); 1675 if (ret) 1676 return ret; 1677 1678 ns->sws = le32_to_cpu(s.sws); 1679 ns->sgs = le16_to_cpu(s.sgs); 1680 1681 if (ns->sws) { 1682 *phys_bs = ns->sws * (1 << ns->lba_shift); 1683 if (ns->sgs) 1684 *io_opt = *phys_bs * ns->sgs; 1685 } 1686 1687 return 0; 1688 } 1689 1690 static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) 1691 { 1692 struct nvme_ctrl *ctrl = ns->ctrl; 1693 1694 /* 1695 * The PI implementation requires the metadata size to be equal to the 1696 * t10 pi tuple size. 1697 */ 1698 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1699 if (ns->ms == sizeof(struct t10_pi_tuple)) 1700 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1701 else 1702 ns->pi_type = 0; 1703 1704 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1705 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1706 return 0; 1707 if (ctrl->ops->flags & NVME_F_FABRICS) { 1708 /* 1709 * The NVMe over Fabrics specification only supports metadata as 1710 * part of the extended data LBA. We rely on HCA/HBA support to 1711 * remap the separate metadata buffer from the block layer. 1712 */ 1713 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1714 return -EINVAL; 1715 if (ctrl->max_integrity_segments) 1716 ns->features |= 1717 (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1718 } else { 1719 /* 1720 * For PCIe controllers, we can't easily remap the separate 1721 * metadata buffer from the block layer and thus require a 1722 * separate metadata buffer for block layer metadata/PI support. 1723 * We allow extended LBAs for the passthrough interface, though. 1724 */ 1725 if (id->flbas & NVME_NS_FLBAS_META_EXT) 1726 ns->features |= NVME_NS_EXT_LBAS; 1727 else 1728 ns->features |= NVME_NS_METADATA_SUPPORTED; 1729 } 1730 1731 return 0; 1732 } 1733 1734 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1735 struct request_queue *q) 1736 { 1737 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; 1738 1739 if (ctrl->max_hw_sectors) { 1740 u32 max_segments = 1741 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; 1742 1743 max_segments = min_not_zero(max_segments, ctrl->max_segments); 1744 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1745 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1746 } 1747 blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); 1748 blk_queue_dma_alignment(q, 7); 1749 blk_queue_write_cache(q, vwc, vwc); 1750 } 1751 1752 static void nvme_update_disk_info(struct gendisk *disk, 1753 struct nvme_ns *ns, struct nvme_id_ns *id) 1754 { 1755 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); 1756 unsigned short bs = 1 << ns->lba_shift; 1757 u32 atomic_bs, phys_bs, io_opt = 0; 1758 1759 /* 1760 * The block layer can't support LBA sizes larger than the page size 1761 * yet, so catch this early and don't allow block I/O. 1762 */ 1763 if (ns->lba_shift > PAGE_SHIFT) { 1764 capacity = 0; 1765 bs = (1 << 9); 1766 } 1767 1768 blk_integrity_unregister(disk); 1769 1770 atomic_bs = phys_bs = bs; 1771 nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt); 1772 if (id->nabo == 0) { 1773 /* 1774 * Bit 1 indicates whether NAWUPF is defined for this namespace 1775 * and whether it should be used instead of AWUPF. If NAWUPF == 1776 * 0 then AWUPF must be used instead. 1777 */ 1778 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) 1779 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; 1780 else 1781 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; 1782 } 1783 1784 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { 1785 /* NPWG = Namespace Preferred Write Granularity */ 1786 phys_bs = bs * (1 + le16_to_cpu(id->npwg)); 1787 /* NOWS = Namespace Optimal Write Size */ 1788 io_opt = bs * (1 + le16_to_cpu(id->nows)); 1789 } 1790 1791 blk_queue_logical_block_size(disk->queue, bs); 1792 /* 1793 * Linux filesystems assume writing a single physical block is 1794 * an atomic operation. Hence limit the physical block size to the 1795 * value of the Atomic Write Unit Power Fail parameter. 1796 */ 1797 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); 1798 blk_queue_io_min(disk->queue, phys_bs); 1799 blk_queue_io_opt(disk->queue, io_opt); 1800 1801 /* 1802 * Register a metadata profile for PI, or the plain non-integrity NVMe 1803 * metadata masquerading as Type 0 if supported, otherwise reject block 1804 * I/O to namespaces with metadata except when the namespace supports 1805 * PI, as it can strip/insert in that case. 1806 */ 1807 if (ns->ms) { 1808 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 1809 (ns->features & NVME_NS_METADATA_SUPPORTED)) 1810 nvme_init_integrity(disk, ns->ms, ns->pi_type, 1811 ns->ctrl->max_integrity_segments); 1812 else if (!nvme_ns_has_pi(ns)) 1813 capacity = 0; 1814 } 1815 1816 set_capacity_and_notify(disk, capacity); 1817 1818 nvme_config_discard(disk, ns); 1819 blk_queue_max_write_zeroes_sectors(disk->queue, 1820 ns->ctrl->max_zeroes_sectors); 1821 1822 set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || 1823 test_bit(NVME_NS_FORCE_RO, &ns->flags)); 1824 } 1825 1826 static inline bool nvme_first_scan(struct gendisk *disk) 1827 { 1828 /* nvme_alloc_ns() scans the disk prior to adding it */ 1829 return !(disk->flags & GENHD_FL_UP); 1830 } 1831 1832 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) 1833 { 1834 struct nvme_ctrl *ctrl = ns->ctrl; 1835 u32 iob; 1836 1837 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 1838 is_power_of_2(ctrl->max_hw_sectors)) 1839 iob = ctrl->max_hw_sectors; 1840 else 1841 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); 1842 1843 if (!iob) 1844 return; 1845 1846 if (!is_power_of_2(iob)) { 1847 if (nvme_first_scan(ns->disk)) 1848 pr_warn("%s: ignoring unaligned IO boundary:%u\n", 1849 ns->disk->disk_name, iob); 1850 return; 1851 } 1852 1853 if (blk_queue_is_zoned(ns->disk->queue)) { 1854 if (nvme_first_scan(ns->disk)) 1855 pr_warn("%s: ignoring zoned namespace IO boundary\n", 1856 ns->disk->disk_name); 1857 return; 1858 } 1859 1860 blk_queue_chunk_sectors(ns->queue, iob); 1861 } 1862 1863 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) 1864 { 1865 unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; 1866 int ret; 1867 1868 blk_mq_freeze_queue(ns->disk->queue); 1869 ns->lba_shift = id->lbaf[lbaf].ds; 1870 nvme_set_queue_limits(ns->ctrl, ns->queue); 1871 1872 ret = nvme_configure_metadata(ns, id); 1873 if (ret) 1874 goto out_unfreeze; 1875 nvme_set_chunk_sectors(ns, id); 1876 nvme_update_disk_info(ns->disk, ns, id); 1877 1878 if (ns->head->ids.csi == NVME_CSI_ZNS) { 1879 ret = nvme_update_zone_info(ns, lbaf); 1880 if (ret) 1881 goto out_unfreeze; 1882 } 1883 1884 blk_mq_unfreeze_queue(ns->disk->queue); 1885 1886 if (blk_queue_is_zoned(ns->queue)) { 1887 ret = nvme_revalidate_zones(ns); 1888 if (ret && !nvme_first_scan(ns->disk)) 1889 goto out; 1890 } 1891 1892 if (nvme_ns_head_multipath(ns->head)) { 1893 blk_mq_freeze_queue(ns->head->disk->queue); 1894 nvme_update_disk_info(ns->head->disk, ns, id); 1895 blk_stack_limits(&ns->head->disk->queue->limits, 1896 &ns->queue->limits, 0); 1897 blk_queue_update_readahead(ns->head->disk->queue); 1898 blk_mq_unfreeze_queue(ns->head->disk->queue); 1899 } 1900 return 0; 1901 1902 out_unfreeze: 1903 blk_mq_unfreeze_queue(ns->disk->queue); 1904 out: 1905 /* 1906 * If probing fails due an unsupported feature, hide the block device, 1907 * but still allow other access. 1908 */ 1909 if (ret == -ENODEV) { 1910 ns->disk->flags |= GENHD_FL_HIDDEN; 1911 ret = 0; 1912 } 1913 return ret; 1914 } 1915 1916 static char nvme_pr_type(enum pr_type type) 1917 { 1918 switch (type) { 1919 case PR_WRITE_EXCLUSIVE: 1920 return 1; 1921 case PR_EXCLUSIVE_ACCESS: 1922 return 2; 1923 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1924 return 3; 1925 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1926 return 4; 1927 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1928 return 5; 1929 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1930 return 6; 1931 default: 1932 return 0; 1933 } 1934 }; 1935 1936 static int nvme_send_ns_head_pr_command(struct block_device *bdev, 1937 struct nvme_command *c, u8 data[16]) 1938 { 1939 struct nvme_ns_head *head = bdev->bd_disk->private_data; 1940 int srcu_idx = srcu_read_lock(&head->srcu); 1941 struct nvme_ns *ns = nvme_find_path(head); 1942 int ret = -EWOULDBLOCK; 1943 1944 if (ns) { 1945 c->common.nsid = cpu_to_le32(ns->head->ns_id); 1946 ret = nvme_submit_sync_cmd(ns->queue, c, data, 16); 1947 } 1948 srcu_read_unlock(&head->srcu, srcu_idx); 1949 return ret; 1950 } 1951 1952 static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, 1953 u8 data[16]) 1954 { 1955 c->common.nsid = cpu_to_le32(ns->head->ns_id); 1956 return nvme_submit_sync_cmd(ns->queue, c, data, 16); 1957 } 1958 1959 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 1960 u64 key, u64 sa_key, u8 op) 1961 { 1962 struct nvme_command c = { }; 1963 u8 data[16] = { 0, }; 1964 1965 put_unaligned_le64(key, &data[0]); 1966 put_unaligned_le64(sa_key, &data[8]); 1967 1968 c.common.opcode = op; 1969 c.common.cdw10 = cpu_to_le32(cdw10); 1970 1971 if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && 1972 bdev->bd_disk->fops == &nvme_ns_head_ops) 1973 return nvme_send_ns_head_pr_command(bdev, &c, data); 1974 return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data); 1975 } 1976 1977 static int nvme_pr_register(struct block_device *bdev, u64 old, 1978 u64 new, unsigned flags) 1979 { 1980 u32 cdw10; 1981 1982 if (flags & ~PR_FL_IGNORE_KEY) 1983 return -EOPNOTSUPP; 1984 1985 cdw10 = old ? 2 : 0; 1986 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 1987 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 1988 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 1989 } 1990 1991 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 1992 enum pr_type type, unsigned flags) 1993 { 1994 u32 cdw10; 1995 1996 if (flags & ~PR_FL_IGNORE_KEY) 1997 return -EOPNOTSUPP; 1998 1999 cdw10 = nvme_pr_type(type) << 8; 2000 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 2001 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 2002 } 2003 2004 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 2005 enum pr_type type, bool abort) 2006 { 2007 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); 2008 2009 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 2010 } 2011 2012 static int nvme_pr_clear(struct block_device *bdev, u64 key) 2013 { 2014 u32 cdw10 = 1 | (key ? 1 << 3 : 0); 2015 2016 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); 2017 } 2018 2019 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 2020 { 2021 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0); 2022 2023 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 2024 } 2025 2026 const struct pr_ops nvme_pr_ops = { 2027 .pr_register = nvme_pr_register, 2028 .pr_reserve = nvme_pr_reserve, 2029 .pr_release = nvme_pr_release, 2030 .pr_preempt = nvme_pr_preempt, 2031 .pr_clear = nvme_pr_clear, 2032 }; 2033 2034 #ifdef CONFIG_BLK_SED_OPAL 2035 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 2036 bool send) 2037 { 2038 struct nvme_ctrl *ctrl = data; 2039 struct nvme_command cmd = { }; 2040 2041 if (send) 2042 cmd.common.opcode = nvme_admin_security_send; 2043 else 2044 cmd.common.opcode = nvme_admin_security_recv; 2045 cmd.common.nsid = 0; 2046 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 2047 cmd.common.cdw11 = cpu_to_le32(len); 2048 2049 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0, 2050 NVME_QID_ANY, 1, 0, false); 2051 } 2052 EXPORT_SYMBOL_GPL(nvme_sec_submit); 2053 #endif /* CONFIG_BLK_SED_OPAL */ 2054 2055 #ifdef CONFIG_BLK_DEV_ZONED 2056 static int nvme_report_zones(struct gendisk *disk, sector_t sector, 2057 unsigned int nr_zones, report_zones_cb cb, void *data) 2058 { 2059 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, 2060 data); 2061 } 2062 #else 2063 #define nvme_report_zones NULL 2064 #endif /* CONFIG_BLK_DEV_ZONED */ 2065 2066 static const struct block_device_operations nvme_bdev_ops = { 2067 .owner = THIS_MODULE, 2068 .ioctl = nvme_ioctl, 2069 .open = nvme_open, 2070 .release = nvme_release, 2071 .getgeo = nvme_getgeo, 2072 .report_zones = nvme_report_zones, 2073 .pr_ops = &nvme_pr_ops, 2074 }; 2075 2076 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 2077 { 2078 unsigned long timeout = 2079 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 2080 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 2081 int ret; 2082 2083 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2084 if (csts == ~0) 2085 return -ENODEV; 2086 if ((csts & NVME_CSTS_RDY) == bit) 2087 break; 2088 2089 usleep_range(1000, 2000); 2090 if (fatal_signal_pending(current)) 2091 return -EINTR; 2092 if (time_after(jiffies, timeout)) { 2093 dev_err(ctrl->device, 2094 "Device not ready; aborting %s, CSTS=0x%x\n", 2095 enabled ? "initialisation" : "reset", csts); 2096 return -ENODEV; 2097 } 2098 } 2099 2100 return ret; 2101 } 2102 2103 /* 2104 * If the device has been passed off to us in an enabled state, just clear 2105 * the enabled bit. The spec says we should set the 'shutdown notification 2106 * bits', but doing so may cause the device to complete commands to the 2107 * admin queue ... and we don't know what memory that might be pointing at! 2108 */ 2109 int nvme_disable_ctrl(struct nvme_ctrl *ctrl) 2110 { 2111 int ret; 2112 2113 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2114 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 2115 2116 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2117 if (ret) 2118 return ret; 2119 2120 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 2121 msleep(NVME_QUIRK_DELAY_AMOUNT); 2122 2123 return nvme_wait_ready(ctrl, ctrl->cap, false); 2124 } 2125 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 2126 2127 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) 2128 { 2129 unsigned dev_page_min; 2130 int ret; 2131 2132 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2133 if (ret) { 2134 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2135 return ret; 2136 } 2137 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2138 2139 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { 2140 dev_err(ctrl->device, 2141 "Minimum device page size %u too large for host (%u)\n", 2142 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); 2143 return -ENODEV; 2144 } 2145 2146 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) 2147 ctrl->ctrl_config = NVME_CC_CSS_CSI; 2148 else 2149 ctrl->ctrl_config = NVME_CC_CSS_NVM; 2150 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 2151 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 2152 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 2153 ctrl->ctrl_config |= NVME_CC_ENABLE; 2154 2155 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2156 if (ret) 2157 return ret; 2158 return nvme_wait_ready(ctrl, ctrl->cap, true); 2159 } 2160 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 2161 2162 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 2163 { 2164 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); 2165 u32 csts; 2166 int ret; 2167 2168 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2169 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 2170 2171 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2172 if (ret) 2173 return ret; 2174 2175 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2176 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 2177 break; 2178 2179 msleep(100); 2180 if (fatal_signal_pending(current)) 2181 return -EINTR; 2182 if (time_after(jiffies, timeout)) { 2183 dev_err(ctrl->device, 2184 "Device shutdown incomplete; abort shutdown\n"); 2185 return -ENODEV; 2186 } 2187 } 2188 2189 return ret; 2190 } 2191 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 2192 2193 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 2194 { 2195 __le64 ts; 2196 int ret; 2197 2198 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 2199 return 0; 2200 2201 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 2202 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 2203 NULL); 2204 if (ret) 2205 dev_warn_once(ctrl->device, 2206 "could not set timestamp (%d)\n", ret); 2207 return ret; 2208 } 2209 2210 static int nvme_configure_acre(struct nvme_ctrl *ctrl) 2211 { 2212 struct nvme_feat_host_behavior *host; 2213 int ret; 2214 2215 /* Don't bother enabling the feature if retry delay is not reported */ 2216 if (!ctrl->crdt[0]) 2217 return 0; 2218 2219 host = kzalloc(sizeof(*host), GFP_KERNEL); 2220 if (!host) 2221 return 0; 2222 2223 host->acre = NVME_ENABLE_ACRE; 2224 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2225 host, sizeof(*host), NULL); 2226 kfree(host); 2227 return ret; 2228 } 2229 2230 /* 2231 * The function checks whether the given total (exlat + enlat) latency of 2232 * a power state allows the latter to be used as an APST transition target. 2233 * It does so by comparing the latency to the primary and secondary latency 2234 * tolerances defined by module params. If there's a match, the corresponding 2235 * timeout value is returned and the matching tolerance index (1 or 2) is 2236 * reported. 2237 */ 2238 static bool nvme_apst_get_transition_time(u64 total_latency, 2239 u64 *transition_time, unsigned *last_index) 2240 { 2241 if (total_latency <= apst_primary_latency_tol_us) { 2242 if (*last_index == 1) 2243 return false; 2244 *last_index = 1; 2245 *transition_time = apst_primary_timeout_ms; 2246 return true; 2247 } 2248 if (apst_secondary_timeout_ms && 2249 total_latency <= apst_secondary_latency_tol_us) { 2250 if (*last_index <= 2) 2251 return false; 2252 *last_index = 2; 2253 *transition_time = apst_secondary_timeout_ms; 2254 return true; 2255 } 2256 return false; 2257 } 2258 2259 /* 2260 * APST (Autonomous Power State Transition) lets us program a table of power 2261 * state transitions that the controller will perform automatically. 2262 * 2263 * Depending on module params, one of the two supported techniques will be used: 2264 * 2265 * - If the parameters provide explicit timeouts and tolerances, they will be 2266 * used to build a table with up to 2 non-operational states to transition to. 2267 * The default parameter values were selected based on the values used by 2268 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic 2269 * regeneration of the APST table in the event of switching between external 2270 * and battery power, the timeouts and tolerances reflect a compromise 2271 * between values used by Microsoft for AC and battery scenarios. 2272 * - If not, we'll configure the table with a simple heuristic: we are willing 2273 * to spend at most 2% of the time transitioning between power states. 2274 * Therefore, when running in any given state, we will enter the next 2275 * lower-power non-operational state after waiting 50 * (enlat + exlat) 2276 * microseconds, as long as that state's exit latency is under the requested 2277 * maximum latency. 2278 * 2279 * We will not autonomously enter any non-operational state for which the total 2280 * latency exceeds ps_max_latency_us. 2281 * 2282 * Users can set ps_max_latency_us to zero to turn off APST. 2283 */ 2284 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2285 { 2286 struct nvme_feat_auto_pst *table; 2287 unsigned apste = 0; 2288 u64 max_lat_us = 0; 2289 __le64 target = 0; 2290 int max_ps = -1; 2291 int state; 2292 int ret; 2293 unsigned last_lt_index = UINT_MAX; 2294 2295 /* 2296 * If APST isn't supported or if we haven't been initialized yet, 2297 * then don't do anything. 2298 */ 2299 if (!ctrl->apsta) 2300 return 0; 2301 2302 if (ctrl->npss > 31) { 2303 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2304 return 0; 2305 } 2306 2307 table = kzalloc(sizeof(*table), GFP_KERNEL); 2308 if (!table) 2309 return 0; 2310 2311 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2312 /* Turn off APST. */ 2313 dev_dbg(ctrl->device, "APST disabled\n"); 2314 goto done; 2315 } 2316 2317 /* 2318 * Walk through all states from lowest- to highest-power. 2319 * According to the spec, lower-numbered states use more power. NPSS, 2320 * despite the name, is the index of the lowest-power state, not the 2321 * number of states. 2322 */ 2323 for (state = (int)ctrl->npss; state >= 0; state--) { 2324 u64 total_latency_us, exit_latency_us, transition_ms; 2325 2326 if (target) 2327 table->entries[state] = target; 2328 2329 /* 2330 * Don't allow transitions to the deepest state if it's quirked 2331 * off. 2332 */ 2333 if (state == ctrl->npss && 2334 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2335 continue; 2336 2337 /* 2338 * Is this state a useful non-operational state for higher-power 2339 * states to autonomously transition to? 2340 */ 2341 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) 2342 continue; 2343 2344 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2345 if (exit_latency_us > ctrl->ps_max_latency_us) 2346 continue; 2347 2348 total_latency_us = exit_latency_us + 2349 le32_to_cpu(ctrl->psd[state].entry_lat); 2350 2351 /* 2352 * This state is good. It can be used as the APST idle target 2353 * for higher power states. 2354 */ 2355 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { 2356 if (!nvme_apst_get_transition_time(total_latency_us, 2357 &transition_ms, &last_lt_index)) 2358 continue; 2359 } else { 2360 transition_ms = total_latency_us + 19; 2361 do_div(transition_ms, 20); 2362 if (transition_ms > (1 << 24) - 1) 2363 transition_ms = (1 << 24) - 1; 2364 } 2365 2366 target = cpu_to_le64((state << 3) | (transition_ms << 8)); 2367 if (max_ps == -1) 2368 max_ps = state; 2369 if (total_latency_us > max_lat_us) 2370 max_lat_us = total_latency_us; 2371 } 2372 2373 if (max_ps == -1) 2374 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2375 else 2376 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2377 max_ps, max_lat_us, (int)sizeof(*table), table); 2378 apste = 1; 2379 2380 done: 2381 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2382 table, sizeof(*table), NULL); 2383 if (ret) 2384 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2385 kfree(table); 2386 return ret; 2387 } 2388 2389 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2390 { 2391 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2392 u64 latency; 2393 2394 switch (val) { 2395 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2396 case PM_QOS_LATENCY_ANY: 2397 latency = U64_MAX; 2398 break; 2399 2400 default: 2401 latency = val; 2402 } 2403 2404 if (ctrl->ps_max_latency_us != latency) { 2405 ctrl->ps_max_latency_us = latency; 2406 if (ctrl->state == NVME_CTRL_LIVE) 2407 nvme_configure_apst(ctrl); 2408 } 2409 } 2410 2411 struct nvme_core_quirk_entry { 2412 /* 2413 * NVMe model and firmware strings are padded with spaces. For 2414 * simplicity, strings in the quirk table are padded with NULLs 2415 * instead. 2416 */ 2417 u16 vid; 2418 const char *mn; 2419 const char *fr; 2420 unsigned long quirks; 2421 }; 2422 2423 static const struct nvme_core_quirk_entry core_quirks[] = { 2424 { 2425 /* 2426 * This Toshiba device seems to die using any APST states. See: 2427 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2428 */ 2429 .vid = 0x1179, 2430 .mn = "THNSF5256GPUK TOSHIBA", 2431 .quirks = NVME_QUIRK_NO_APST, 2432 }, 2433 { 2434 /* 2435 * This LiteON CL1-3D*-Q11 firmware version has a race 2436 * condition associated with actions related to suspend to idle 2437 * LiteON has resolved the problem in future firmware 2438 */ 2439 .vid = 0x14a4, 2440 .fr = "22301111", 2441 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2442 } 2443 }; 2444 2445 /* match is null-terminated but idstr is space-padded. */ 2446 static bool string_matches(const char *idstr, const char *match, size_t len) 2447 { 2448 size_t matchlen; 2449 2450 if (!match) 2451 return true; 2452 2453 matchlen = strlen(match); 2454 WARN_ON_ONCE(matchlen > len); 2455 2456 if (memcmp(idstr, match, matchlen)) 2457 return false; 2458 2459 for (; matchlen < len; matchlen++) 2460 if (idstr[matchlen] != ' ') 2461 return false; 2462 2463 return true; 2464 } 2465 2466 static bool quirk_matches(const struct nvme_id_ctrl *id, 2467 const struct nvme_core_quirk_entry *q) 2468 { 2469 return q->vid == le16_to_cpu(id->vid) && 2470 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2471 string_matches(id->fr, q->fr, sizeof(id->fr)); 2472 } 2473 2474 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2475 struct nvme_id_ctrl *id) 2476 { 2477 size_t nqnlen; 2478 int off; 2479 2480 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2481 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2482 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2483 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2484 return; 2485 } 2486 2487 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2488 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2489 } 2490 2491 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 2492 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2493 "nqn.2014.08.org.nvmexpress:%04x%04x", 2494 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2495 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2496 off += sizeof(id->sn); 2497 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2498 off += sizeof(id->mn); 2499 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2500 } 2501 2502 static void nvme_release_subsystem(struct device *dev) 2503 { 2504 struct nvme_subsystem *subsys = 2505 container_of(dev, struct nvme_subsystem, dev); 2506 2507 if (subsys->instance >= 0) 2508 ida_simple_remove(&nvme_instance_ida, subsys->instance); 2509 kfree(subsys); 2510 } 2511 2512 static void nvme_destroy_subsystem(struct kref *ref) 2513 { 2514 struct nvme_subsystem *subsys = 2515 container_of(ref, struct nvme_subsystem, ref); 2516 2517 mutex_lock(&nvme_subsystems_lock); 2518 list_del(&subsys->entry); 2519 mutex_unlock(&nvme_subsystems_lock); 2520 2521 ida_destroy(&subsys->ns_ida); 2522 device_del(&subsys->dev); 2523 put_device(&subsys->dev); 2524 } 2525 2526 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2527 { 2528 kref_put(&subsys->ref, nvme_destroy_subsystem); 2529 } 2530 2531 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2532 { 2533 struct nvme_subsystem *subsys; 2534 2535 lockdep_assert_held(&nvme_subsystems_lock); 2536 2537 /* 2538 * Fail matches for discovery subsystems. This results 2539 * in each discovery controller bound to a unique subsystem. 2540 * This avoids issues with validating controller values 2541 * that can only be true when there is a single unique subsystem. 2542 * There may be multiple and completely independent entities 2543 * that provide discovery controllers. 2544 */ 2545 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) 2546 return NULL; 2547 2548 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2549 if (strcmp(subsys->subnqn, subsysnqn)) 2550 continue; 2551 if (!kref_get_unless_zero(&subsys->ref)) 2552 continue; 2553 return subsys; 2554 } 2555 2556 return NULL; 2557 } 2558 2559 #define SUBSYS_ATTR_RO(_name, _mode, _show) \ 2560 struct device_attribute subsys_attr_##_name = \ 2561 __ATTR(_name, _mode, _show, NULL) 2562 2563 static ssize_t nvme_subsys_show_nqn(struct device *dev, 2564 struct device_attribute *attr, 2565 char *buf) 2566 { 2567 struct nvme_subsystem *subsys = 2568 container_of(dev, struct nvme_subsystem, dev); 2569 2570 return sysfs_emit(buf, "%s\n", subsys->subnqn); 2571 } 2572 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2573 2574 #define nvme_subsys_show_str_function(field) \ 2575 static ssize_t subsys_##field##_show(struct device *dev, \ 2576 struct device_attribute *attr, char *buf) \ 2577 { \ 2578 struct nvme_subsystem *subsys = \ 2579 container_of(dev, struct nvme_subsystem, dev); \ 2580 return sysfs_emit(buf, "%.*s\n", \ 2581 (int)sizeof(subsys->field), subsys->field); \ 2582 } \ 2583 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 2584 2585 nvme_subsys_show_str_function(model); 2586 nvme_subsys_show_str_function(serial); 2587 nvme_subsys_show_str_function(firmware_rev); 2588 2589 static struct attribute *nvme_subsys_attrs[] = { 2590 &subsys_attr_model.attr, 2591 &subsys_attr_serial.attr, 2592 &subsys_attr_firmware_rev.attr, 2593 &subsys_attr_subsysnqn.attr, 2594 #ifdef CONFIG_NVME_MULTIPATH 2595 &subsys_attr_iopolicy.attr, 2596 #endif 2597 NULL, 2598 }; 2599 2600 static const struct attribute_group nvme_subsys_attrs_group = { 2601 .attrs = nvme_subsys_attrs, 2602 }; 2603 2604 static const struct attribute_group *nvme_subsys_attrs_groups[] = { 2605 &nvme_subsys_attrs_group, 2606 NULL, 2607 }; 2608 2609 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) 2610 { 2611 return ctrl->opts && ctrl->opts->discovery_nqn; 2612 } 2613 2614 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 2615 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2616 { 2617 struct nvme_ctrl *tmp; 2618 2619 lockdep_assert_held(&nvme_subsystems_lock); 2620 2621 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { 2622 if (nvme_state_terminal(tmp)) 2623 continue; 2624 2625 if (tmp->cntlid == ctrl->cntlid) { 2626 dev_err(ctrl->device, 2627 "Duplicate cntlid %u with %s, rejecting\n", 2628 ctrl->cntlid, dev_name(tmp->device)); 2629 return false; 2630 } 2631 2632 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || 2633 nvme_discovery_ctrl(ctrl)) 2634 continue; 2635 2636 dev_err(ctrl->device, 2637 "Subsystem does not support multiple controllers\n"); 2638 return false; 2639 } 2640 2641 return true; 2642 } 2643 2644 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2645 { 2646 struct nvme_subsystem *subsys, *found; 2647 int ret; 2648 2649 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2650 if (!subsys) 2651 return -ENOMEM; 2652 2653 subsys->instance = -1; 2654 mutex_init(&subsys->lock); 2655 kref_init(&subsys->ref); 2656 INIT_LIST_HEAD(&subsys->ctrls); 2657 INIT_LIST_HEAD(&subsys->nsheads); 2658 nvme_init_subnqn(subsys, ctrl, id); 2659 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2660 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2661 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); 2662 subsys->vendor_id = le16_to_cpu(id->vid); 2663 subsys->cmic = id->cmic; 2664 subsys->awupf = le16_to_cpu(id->awupf); 2665 #ifdef CONFIG_NVME_MULTIPATH 2666 subsys->iopolicy = NVME_IOPOLICY_NUMA; 2667 #endif 2668 2669 subsys->dev.class = nvme_subsys_class; 2670 subsys->dev.release = nvme_release_subsystem; 2671 subsys->dev.groups = nvme_subsys_attrs_groups; 2672 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); 2673 device_initialize(&subsys->dev); 2674 2675 mutex_lock(&nvme_subsystems_lock); 2676 found = __nvme_find_get_subsystem(subsys->subnqn); 2677 if (found) { 2678 put_device(&subsys->dev); 2679 subsys = found; 2680 2681 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 2682 ret = -EINVAL; 2683 goto out_put_subsystem; 2684 } 2685 } else { 2686 ret = device_add(&subsys->dev); 2687 if (ret) { 2688 dev_err(ctrl->device, 2689 "failed to register subsystem device.\n"); 2690 put_device(&subsys->dev); 2691 goto out_unlock; 2692 } 2693 ida_init(&subsys->ns_ida); 2694 list_add_tail(&subsys->entry, &nvme_subsystems); 2695 } 2696 2697 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2698 dev_name(ctrl->device)); 2699 if (ret) { 2700 dev_err(ctrl->device, 2701 "failed to create sysfs link from subsystem.\n"); 2702 goto out_put_subsystem; 2703 } 2704 2705 if (!found) 2706 subsys->instance = ctrl->instance; 2707 ctrl->subsys = subsys; 2708 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2709 mutex_unlock(&nvme_subsystems_lock); 2710 return 0; 2711 2712 out_put_subsystem: 2713 nvme_put_subsystem(subsys); 2714 out_unlock: 2715 mutex_unlock(&nvme_subsystems_lock); 2716 return ret; 2717 } 2718 2719 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 2720 void *log, size_t size, u64 offset) 2721 { 2722 struct nvme_command c = { }; 2723 u32 dwlen = nvme_bytes_to_numd(size); 2724 2725 c.get_log_page.opcode = nvme_admin_get_log_page; 2726 c.get_log_page.nsid = cpu_to_le32(nsid); 2727 c.get_log_page.lid = log_page; 2728 c.get_log_page.lsp = lsp; 2729 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 2730 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 2731 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 2732 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 2733 c.get_log_page.csi = csi; 2734 2735 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2736 } 2737 2738 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, 2739 struct nvme_effects_log **log) 2740 { 2741 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); 2742 int ret; 2743 2744 if (cel) 2745 goto out; 2746 2747 cel = kzalloc(sizeof(*cel), GFP_KERNEL); 2748 if (!cel) 2749 return -ENOMEM; 2750 2751 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, 2752 cel, sizeof(*cel), 0); 2753 if (ret) { 2754 kfree(cel); 2755 return ret; 2756 } 2757 2758 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); 2759 out: 2760 *log = cel; 2761 return 0; 2762 } 2763 2764 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units) 2765 { 2766 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; 2767 2768 if (check_shl_overflow(1U, units + page_shift - 9, &val)) 2769 return UINT_MAX; 2770 return val; 2771 } 2772 2773 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) 2774 { 2775 struct nvme_command c = { }; 2776 struct nvme_id_ctrl_nvm *id; 2777 int ret; 2778 2779 if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { 2780 ctrl->max_discard_sectors = UINT_MAX; 2781 ctrl->max_discard_segments = NVME_DSM_MAX_RANGES; 2782 } else { 2783 ctrl->max_discard_sectors = 0; 2784 ctrl->max_discard_segments = 0; 2785 } 2786 2787 /* 2788 * Even though NVMe spec explicitly states that MDTS is not applicable 2789 * to the write-zeroes, we are cautious and limit the size to the 2790 * controllers max_hw_sectors value, which is based on the MDTS field 2791 * and possibly other limiting factors. 2792 */ 2793 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && 2794 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 2795 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; 2796 else 2797 ctrl->max_zeroes_sectors = 0; 2798 2799 if (nvme_ctrl_limited_cns(ctrl)) 2800 return 0; 2801 2802 id = kzalloc(sizeof(*id), GFP_KERNEL); 2803 if (!id) 2804 return 0; 2805 2806 c.identify.opcode = nvme_admin_identify; 2807 c.identify.cns = NVME_ID_CNS_CS_CTRL; 2808 c.identify.csi = NVME_CSI_NVM; 2809 2810 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 2811 if (ret) 2812 goto free_data; 2813 2814 if (id->dmrl) 2815 ctrl->max_discard_segments = id->dmrl; 2816 if (id->dmrsl) 2817 ctrl->max_discard_sectors = le32_to_cpu(id->dmrsl); 2818 if (id->wzsl) 2819 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); 2820 2821 free_data: 2822 kfree(id); 2823 return ret; 2824 } 2825 2826 static int nvme_init_identify(struct nvme_ctrl *ctrl) 2827 { 2828 struct nvme_id_ctrl *id; 2829 u32 max_hw_sectors; 2830 bool prev_apst_enabled; 2831 int ret; 2832 2833 ret = nvme_identify_ctrl(ctrl, &id); 2834 if (ret) { 2835 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 2836 return -EIO; 2837 } 2838 2839 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 2840 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); 2841 if (ret < 0) 2842 goto out_free; 2843 } 2844 2845 if (!(ctrl->ops->flags & NVME_F_FABRICS)) 2846 ctrl->cntlid = le16_to_cpu(id->cntlid); 2847 2848 if (!ctrl->identified) { 2849 unsigned int i; 2850 2851 ret = nvme_init_subsystem(ctrl, id); 2852 if (ret) 2853 goto out_free; 2854 2855 /* 2856 * Check for quirks. Quirk can depend on firmware version, 2857 * so, in principle, the set of quirks present can change 2858 * across a reset. As a possible future enhancement, we 2859 * could re-scan for quirks every time we reinitialize 2860 * the device, but we'd have to make sure that the driver 2861 * behaves intelligently if the quirks change. 2862 */ 2863 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 2864 if (quirk_matches(id, &core_quirks[i])) 2865 ctrl->quirks |= core_quirks[i].quirks; 2866 } 2867 } 2868 2869 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 2870 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 2871 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 2872 } 2873 2874 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 2875 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 2876 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 2877 2878 ctrl->oacs = le16_to_cpu(id->oacs); 2879 ctrl->oncs = le16_to_cpu(id->oncs); 2880 ctrl->mtfa = le16_to_cpu(id->mtfa); 2881 ctrl->oaes = le32_to_cpu(id->oaes); 2882 ctrl->wctemp = le16_to_cpu(id->wctemp); 2883 ctrl->cctemp = le16_to_cpu(id->cctemp); 2884 2885 atomic_set(&ctrl->abort_limit, id->acl + 1); 2886 ctrl->vwc = id->vwc; 2887 if (id->mdts) 2888 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); 2889 else 2890 max_hw_sectors = UINT_MAX; 2891 ctrl->max_hw_sectors = 2892 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 2893 2894 nvme_set_queue_limits(ctrl, ctrl->admin_q); 2895 ctrl->sgls = le32_to_cpu(id->sgls); 2896 ctrl->kas = le16_to_cpu(id->kas); 2897 ctrl->max_namespaces = le32_to_cpu(id->mnan); 2898 ctrl->ctratt = le32_to_cpu(id->ctratt); 2899 2900 if (id->rtd3e) { 2901 /* us -> s */ 2902 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; 2903 2904 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 2905 shutdown_timeout, 60); 2906 2907 if (ctrl->shutdown_timeout != shutdown_timeout) 2908 dev_info(ctrl->device, 2909 "Shutdown timeout set to %u seconds\n", 2910 ctrl->shutdown_timeout); 2911 } else 2912 ctrl->shutdown_timeout = shutdown_timeout; 2913 2914 ctrl->npss = id->npss; 2915 ctrl->apsta = id->apsta; 2916 prev_apst_enabled = ctrl->apst_enabled; 2917 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 2918 if (force_apst && id->apsta) { 2919 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 2920 ctrl->apst_enabled = true; 2921 } else { 2922 ctrl->apst_enabled = false; 2923 } 2924 } else { 2925 ctrl->apst_enabled = id->apsta; 2926 } 2927 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 2928 2929 if (ctrl->ops->flags & NVME_F_FABRICS) { 2930 ctrl->icdoff = le16_to_cpu(id->icdoff); 2931 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 2932 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 2933 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 2934 2935 /* 2936 * In fabrics we need to verify the cntlid matches the 2937 * admin connect 2938 */ 2939 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 2940 dev_err(ctrl->device, 2941 "Mismatching cntlid: Connect %u vs Identify " 2942 "%u, rejecting\n", 2943 ctrl->cntlid, le16_to_cpu(id->cntlid)); 2944 ret = -EINVAL; 2945 goto out_free; 2946 } 2947 2948 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { 2949 dev_err(ctrl->device, 2950 "keep-alive support is mandatory for fabrics\n"); 2951 ret = -EINVAL; 2952 goto out_free; 2953 } 2954 } else { 2955 ctrl->hmpre = le32_to_cpu(id->hmpre); 2956 ctrl->hmmin = le32_to_cpu(id->hmmin); 2957 ctrl->hmminds = le32_to_cpu(id->hmminds); 2958 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 2959 } 2960 2961 ret = nvme_mpath_init_identify(ctrl, id); 2962 if (ret < 0) 2963 goto out_free; 2964 2965 if (ctrl->apst_enabled && !prev_apst_enabled) 2966 dev_pm_qos_expose_latency_tolerance(ctrl->device); 2967 else if (!ctrl->apst_enabled && prev_apst_enabled) 2968 dev_pm_qos_hide_latency_tolerance(ctrl->device); 2969 2970 out_free: 2971 kfree(id); 2972 return ret; 2973 } 2974 2975 /* 2976 * Initialize the cached copies of the Identify data and various controller 2977 * register in our nvme_ctrl structure. This should be called as soon as 2978 * the admin queue is fully up and running. 2979 */ 2980 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl) 2981 { 2982 int ret; 2983 2984 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 2985 if (ret) { 2986 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 2987 return ret; 2988 } 2989 2990 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); 2991 2992 if (ctrl->vs >= NVME_VS(1, 1, 0)) 2993 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); 2994 2995 ret = nvme_init_identify(ctrl); 2996 if (ret) 2997 return ret; 2998 2999 ret = nvme_init_non_mdts_limits(ctrl); 3000 if (ret < 0) 3001 return ret; 3002 3003 ret = nvme_configure_apst(ctrl); 3004 if (ret < 0) 3005 return ret; 3006 3007 ret = nvme_configure_timestamp(ctrl); 3008 if (ret < 0) 3009 return ret; 3010 3011 ret = nvme_configure_directives(ctrl); 3012 if (ret < 0) 3013 return ret; 3014 3015 ret = nvme_configure_acre(ctrl); 3016 if (ret < 0) 3017 return ret; 3018 3019 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { 3020 ret = nvme_hwmon_init(ctrl); 3021 if (ret < 0) 3022 return ret; 3023 } 3024 3025 ctrl->identified = true; 3026 3027 return 0; 3028 } 3029 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); 3030 3031 static int nvme_dev_open(struct inode *inode, struct file *file) 3032 { 3033 struct nvme_ctrl *ctrl = 3034 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3035 3036 switch (ctrl->state) { 3037 case NVME_CTRL_LIVE: 3038 break; 3039 default: 3040 return -EWOULDBLOCK; 3041 } 3042 3043 nvme_get_ctrl(ctrl); 3044 if (!try_module_get(ctrl->ops->module)) { 3045 nvme_put_ctrl(ctrl); 3046 return -EINVAL; 3047 } 3048 3049 file->private_data = ctrl; 3050 return 0; 3051 } 3052 3053 static int nvme_dev_release(struct inode *inode, struct file *file) 3054 { 3055 struct nvme_ctrl *ctrl = 3056 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3057 3058 module_put(ctrl->ops->module); 3059 nvme_put_ctrl(ctrl); 3060 return 0; 3061 } 3062 3063 static const struct file_operations nvme_dev_fops = { 3064 .owner = THIS_MODULE, 3065 .open = nvme_dev_open, 3066 .release = nvme_dev_release, 3067 .unlocked_ioctl = nvme_dev_ioctl, 3068 .compat_ioctl = compat_ptr_ioctl, 3069 }; 3070 3071 static ssize_t nvme_sysfs_reset(struct device *dev, 3072 struct device_attribute *attr, const char *buf, 3073 size_t count) 3074 { 3075 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3076 int ret; 3077 3078 ret = nvme_reset_ctrl_sync(ctrl); 3079 if (ret < 0) 3080 return ret; 3081 return count; 3082 } 3083 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 3084 3085 static ssize_t nvme_sysfs_rescan(struct device *dev, 3086 struct device_attribute *attr, const char *buf, 3087 size_t count) 3088 { 3089 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3090 3091 nvme_queue_scan(ctrl); 3092 return count; 3093 } 3094 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 3095 3096 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 3097 { 3098 struct gendisk *disk = dev_to_disk(dev); 3099 3100 if (disk->fops == &nvme_bdev_ops) 3101 return nvme_get_ns_from_dev(dev)->head; 3102 else 3103 return disk->private_data; 3104 } 3105 3106 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 3107 char *buf) 3108 { 3109 struct nvme_ns_head *head = dev_to_ns_head(dev); 3110 struct nvme_ns_ids *ids = &head->ids; 3111 struct nvme_subsystem *subsys = head->subsys; 3112 int serial_len = sizeof(subsys->serial); 3113 int model_len = sizeof(subsys->model); 3114 3115 if (!uuid_is_null(&ids->uuid)) 3116 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); 3117 3118 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3119 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); 3120 3121 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3122 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); 3123 3124 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 3125 subsys->serial[serial_len - 1] == '\0')) 3126 serial_len--; 3127 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 3128 subsys->model[model_len - 1] == '\0')) 3129 model_len--; 3130 3131 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 3132 serial_len, subsys->serial, model_len, subsys->model, 3133 head->ns_id); 3134 } 3135 static DEVICE_ATTR_RO(wwid); 3136 3137 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 3138 char *buf) 3139 { 3140 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 3141 } 3142 static DEVICE_ATTR_RO(nguid); 3143 3144 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 3145 char *buf) 3146 { 3147 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3148 3149 /* For backward compatibility expose the NGUID to userspace if 3150 * we have no UUID set 3151 */ 3152 if (uuid_is_null(&ids->uuid)) { 3153 printk_ratelimited(KERN_WARNING 3154 "No UUID available providing old NGUID\n"); 3155 return sysfs_emit(buf, "%pU\n", ids->nguid); 3156 } 3157 return sysfs_emit(buf, "%pU\n", &ids->uuid); 3158 } 3159 static DEVICE_ATTR_RO(uuid); 3160 3161 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 3162 char *buf) 3163 { 3164 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 3165 } 3166 static DEVICE_ATTR_RO(eui); 3167 3168 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 3169 char *buf) 3170 { 3171 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 3172 } 3173 static DEVICE_ATTR_RO(nsid); 3174 3175 static struct attribute *nvme_ns_id_attrs[] = { 3176 &dev_attr_wwid.attr, 3177 &dev_attr_uuid.attr, 3178 &dev_attr_nguid.attr, 3179 &dev_attr_eui.attr, 3180 &dev_attr_nsid.attr, 3181 #ifdef CONFIG_NVME_MULTIPATH 3182 &dev_attr_ana_grpid.attr, 3183 &dev_attr_ana_state.attr, 3184 #endif 3185 NULL, 3186 }; 3187 3188 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, 3189 struct attribute *a, int n) 3190 { 3191 struct device *dev = container_of(kobj, struct device, kobj); 3192 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3193 3194 if (a == &dev_attr_uuid.attr) { 3195 if (uuid_is_null(&ids->uuid) && 3196 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3197 return 0; 3198 } 3199 if (a == &dev_attr_nguid.attr) { 3200 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3201 return 0; 3202 } 3203 if (a == &dev_attr_eui.attr) { 3204 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3205 return 0; 3206 } 3207 #ifdef CONFIG_NVME_MULTIPATH 3208 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { 3209 if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */ 3210 return 0; 3211 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) 3212 return 0; 3213 } 3214 #endif 3215 return a->mode; 3216 } 3217 3218 static const struct attribute_group nvme_ns_id_attr_group = { 3219 .attrs = nvme_ns_id_attrs, 3220 .is_visible = nvme_ns_id_attrs_are_visible, 3221 }; 3222 3223 const struct attribute_group *nvme_ns_id_attr_groups[] = { 3224 &nvme_ns_id_attr_group, 3225 #ifdef CONFIG_NVM 3226 &nvme_nvm_attr_group, 3227 #endif 3228 NULL, 3229 }; 3230 3231 #define nvme_show_str_function(field) \ 3232 static ssize_t field##_show(struct device *dev, \ 3233 struct device_attribute *attr, char *buf) \ 3234 { \ 3235 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3236 return sysfs_emit(buf, "%.*s\n", \ 3237 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 3238 } \ 3239 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3240 3241 nvme_show_str_function(model); 3242 nvme_show_str_function(serial); 3243 nvme_show_str_function(firmware_rev); 3244 3245 #define nvme_show_int_function(field) \ 3246 static ssize_t field##_show(struct device *dev, \ 3247 struct device_attribute *attr, char *buf) \ 3248 { \ 3249 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3250 return sysfs_emit(buf, "%d\n", ctrl->field); \ 3251 } \ 3252 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3253 3254 nvme_show_int_function(cntlid); 3255 nvme_show_int_function(numa_node); 3256 nvme_show_int_function(queue_count); 3257 nvme_show_int_function(sqsize); 3258 nvme_show_int_function(kato); 3259 3260 static ssize_t nvme_sysfs_delete(struct device *dev, 3261 struct device_attribute *attr, const char *buf, 3262 size_t count) 3263 { 3264 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3265 3266 if (device_remove_file_self(dev, attr)) 3267 nvme_delete_ctrl_sync(ctrl); 3268 return count; 3269 } 3270 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 3271 3272 static ssize_t nvme_sysfs_show_transport(struct device *dev, 3273 struct device_attribute *attr, 3274 char *buf) 3275 { 3276 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3277 3278 return sysfs_emit(buf, "%s\n", ctrl->ops->name); 3279 } 3280 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 3281 3282 static ssize_t nvme_sysfs_show_state(struct device *dev, 3283 struct device_attribute *attr, 3284 char *buf) 3285 { 3286 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3287 static const char *const state_name[] = { 3288 [NVME_CTRL_NEW] = "new", 3289 [NVME_CTRL_LIVE] = "live", 3290 [NVME_CTRL_RESETTING] = "resetting", 3291 [NVME_CTRL_CONNECTING] = "connecting", 3292 [NVME_CTRL_DELETING] = "deleting", 3293 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)", 3294 [NVME_CTRL_DEAD] = "dead", 3295 }; 3296 3297 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 3298 state_name[ctrl->state]) 3299 return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); 3300 3301 return sysfs_emit(buf, "unknown state\n"); 3302 } 3303 3304 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 3305 3306 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 3307 struct device_attribute *attr, 3308 char *buf) 3309 { 3310 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3311 3312 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); 3313 } 3314 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 3315 3316 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, 3317 struct device_attribute *attr, 3318 char *buf) 3319 { 3320 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3321 3322 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); 3323 } 3324 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); 3325 3326 static ssize_t nvme_sysfs_show_hostid(struct device *dev, 3327 struct device_attribute *attr, 3328 char *buf) 3329 { 3330 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3331 3332 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); 3333 } 3334 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); 3335 3336 static ssize_t nvme_sysfs_show_address(struct device *dev, 3337 struct device_attribute *attr, 3338 char *buf) 3339 { 3340 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3341 3342 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 3343 } 3344 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 3345 3346 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, 3347 struct device_attribute *attr, char *buf) 3348 { 3349 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3350 struct nvmf_ctrl_options *opts = ctrl->opts; 3351 3352 if (ctrl->opts->max_reconnects == -1) 3353 return sysfs_emit(buf, "off\n"); 3354 return sysfs_emit(buf, "%d\n", 3355 opts->max_reconnects * opts->reconnect_delay); 3356 } 3357 3358 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, 3359 struct device_attribute *attr, const char *buf, size_t count) 3360 { 3361 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3362 struct nvmf_ctrl_options *opts = ctrl->opts; 3363 int ctrl_loss_tmo, err; 3364 3365 err = kstrtoint(buf, 10, &ctrl_loss_tmo); 3366 if (err) 3367 return -EINVAL; 3368 3369 if (ctrl_loss_tmo < 0) 3370 opts->max_reconnects = -1; 3371 else 3372 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 3373 opts->reconnect_delay); 3374 return count; 3375 } 3376 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR, 3377 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store); 3378 3379 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, 3380 struct device_attribute *attr, char *buf) 3381 { 3382 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3383 3384 if (ctrl->opts->reconnect_delay == -1) 3385 return sysfs_emit(buf, "off\n"); 3386 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); 3387 } 3388 3389 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, 3390 struct device_attribute *attr, const char *buf, size_t count) 3391 { 3392 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3393 unsigned int v; 3394 int err; 3395 3396 err = kstrtou32(buf, 10, &v); 3397 if (err) 3398 return err; 3399 3400 ctrl->opts->reconnect_delay = v; 3401 return count; 3402 } 3403 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, 3404 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store); 3405 3406 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev, 3407 struct device_attribute *attr, char *buf) 3408 { 3409 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3410 3411 if (ctrl->opts->fast_io_fail_tmo == -1) 3412 return sysfs_emit(buf, "off\n"); 3413 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo); 3414 } 3415 3416 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, 3417 struct device_attribute *attr, const char *buf, size_t count) 3418 { 3419 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3420 struct nvmf_ctrl_options *opts = ctrl->opts; 3421 int fast_io_fail_tmo, err; 3422 3423 err = kstrtoint(buf, 10, &fast_io_fail_tmo); 3424 if (err) 3425 return -EINVAL; 3426 3427 if (fast_io_fail_tmo < 0) 3428 opts->fast_io_fail_tmo = -1; 3429 else 3430 opts->fast_io_fail_tmo = fast_io_fail_tmo; 3431 return count; 3432 } 3433 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, 3434 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); 3435 3436 static struct attribute *nvme_dev_attrs[] = { 3437 &dev_attr_reset_controller.attr, 3438 &dev_attr_rescan_controller.attr, 3439 &dev_attr_model.attr, 3440 &dev_attr_serial.attr, 3441 &dev_attr_firmware_rev.attr, 3442 &dev_attr_cntlid.attr, 3443 &dev_attr_delete_controller.attr, 3444 &dev_attr_transport.attr, 3445 &dev_attr_subsysnqn.attr, 3446 &dev_attr_address.attr, 3447 &dev_attr_state.attr, 3448 &dev_attr_numa_node.attr, 3449 &dev_attr_queue_count.attr, 3450 &dev_attr_sqsize.attr, 3451 &dev_attr_hostnqn.attr, 3452 &dev_attr_hostid.attr, 3453 &dev_attr_ctrl_loss_tmo.attr, 3454 &dev_attr_reconnect_delay.attr, 3455 &dev_attr_fast_io_fail_tmo.attr, 3456 &dev_attr_kato.attr, 3457 NULL 3458 }; 3459 3460 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 3461 struct attribute *a, int n) 3462 { 3463 struct device *dev = container_of(kobj, struct device, kobj); 3464 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3465 3466 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 3467 return 0; 3468 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 3469 return 0; 3470 if (a == &dev_attr_hostnqn.attr && !ctrl->opts) 3471 return 0; 3472 if (a == &dev_attr_hostid.attr && !ctrl->opts) 3473 return 0; 3474 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) 3475 return 0; 3476 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) 3477 return 0; 3478 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) 3479 return 0; 3480 3481 return a->mode; 3482 } 3483 3484 static const struct attribute_group nvme_dev_attrs_group = { 3485 .attrs = nvme_dev_attrs, 3486 .is_visible = nvme_dev_attrs_are_visible, 3487 }; 3488 3489 static const struct attribute_group *nvme_dev_attr_groups[] = { 3490 &nvme_dev_attrs_group, 3491 NULL, 3492 }; 3493 3494 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, 3495 unsigned nsid) 3496 { 3497 struct nvme_ns_head *h; 3498 3499 lockdep_assert_held(&subsys->lock); 3500 3501 list_for_each_entry(h, &subsys->nsheads, entry) { 3502 if (h->ns_id == nsid && nvme_tryget_ns_head(h)) 3503 return h; 3504 } 3505 3506 return NULL; 3507 } 3508 3509 static int __nvme_check_ids(struct nvme_subsystem *subsys, 3510 struct nvme_ns_head *new) 3511 { 3512 struct nvme_ns_head *h; 3513 3514 lockdep_assert_held(&subsys->lock); 3515 3516 list_for_each_entry(h, &subsys->nsheads, entry) { 3517 if (nvme_ns_ids_valid(&new->ids) && 3518 nvme_ns_ids_equal(&new->ids, &h->ids)) 3519 return -EINVAL; 3520 } 3521 3522 return 0; 3523 } 3524 3525 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) 3526 { 3527 cdev_device_del(cdev, cdev_device); 3528 ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(cdev_device->devt)); 3529 } 3530 3531 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, 3532 const struct file_operations *fops, struct module *owner) 3533 { 3534 int minor, ret; 3535 3536 minor = ida_simple_get(&nvme_ns_chr_minor_ida, 0, 0, GFP_KERNEL); 3537 if (minor < 0) 3538 return minor; 3539 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); 3540 cdev_device->class = nvme_ns_chr_class; 3541 device_initialize(cdev_device); 3542 cdev_init(cdev, fops); 3543 cdev->owner = owner; 3544 ret = cdev_device_add(cdev, cdev_device); 3545 if (ret) { 3546 put_device(cdev_device); 3547 ida_simple_remove(&nvme_ns_chr_minor_ida, minor); 3548 } 3549 return ret; 3550 } 3551 3552 static int nvme_ns_chr_open(struct inode *inode, struct file *file) 3553 { 3554 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3555 } 3556 3557 static int nvme_ns_chr_release(struct inode *inode, struct file *file) 3558 { 3559 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3560 return 0; 3561 } 3562 3563 static const struct file_operations nvme_ns_chr_fops = { 3564 .owner = THIS_MODULE, 3565 .open = nvme_ns_chr_open, 3566 .release = nvme_ns_chr_release, 3567 .unlocked_ioctl = nvme_ns_chr_ioctl, 3568 .compat_ioctl = compat_ptr_ioctl, 3569 }; 3570 3571 static int nvme_add_ns_cdev(struct nvme_ns *ns) 3572 { 3573 int ret; 3574 3575 ns->cdev_device.parent = ns->ctrl->device; 3576 ret = dev_set_name(&ns->cdev_device, "ng%dn%d", 3577 ns->ctrl->instance, ns->head->instance); 3578 if (ret) 3579 return ret; 3580 ret = nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, 3581 ns->ctrl->ops->module); 3582 if (ret) 3583 kfree_const(ns->cdev_device.kobj.name); 3584 return ret; 3585 } 3586 3587 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 3588 unsigned nsid, struct nvme_ns_ids *ids) 3589 { 3590 struct nvme_ns_head *head; 3591 size_t size = sizeof(*head); 3592 int ret = -ENOMEM; 3593 3594 #ifdef CONFIG_NVME_MULTIPATH 3595 size += num_possible_nodes() * sizeof(struct nvme_ns *); 3596 #endif 3597 3598 head = kzalloc(size, GFP_KERNEL); 3599 if (!head) 3600 goto out; 3601 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); 3602 if (ret < 0) 3603 goto out_free_head; 3604 head->instance = ret; 3605 INIT_LIST_HEAD(&head->list); 3606 ret = init_srcu_struct(&head->srcu); 3607 if (ret) 3608 goto out_ida_remove; 3609 head->subsys = ctrl->subsys; 3610 head->ns_id = nsid; 3611 head->ids = *ids; 3612 kref_init(&head->ref); 3613 3614 ret = __nvme_check_ids(ctrl->subsys, head); 3615 if (ret) { 3616 dev_err(ctrl->device, 3617 "duplicate IDs for nsid %d\n", nsid); 3618 goto out_cleanup_srcu; 3619 } 3620 3621 if (head->ids.csi) { 3622 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); 3623 if (ret) 3624 goto out_cleanup_srcu; 3625 } else 3626 head->effects = ctrl->effects; 3627 3628 ret = nvme_mpath_alloc_disk(ctrl, head); 3629 if (ret) 3630 goto out_cleanup_srcu; 3631 3632 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 3633 3634 kref_get(&ctrl->subsys->ref); 3635 3636 return head; 3637 out_cleanup_srcu: 3638 cleanup_srcu_struct(&head->srcu); 3639 out_ida_remove: 3640 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); 3641 out_free_head: 3642 kfree(head); 3643 out: 3644 if (ret > 0) 3645 ret = blk_status_to_errno(nvme_error_status(ret)); 3646 return ERR_PTR(ret); 3647 } 3648 3649 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, 3650 struct nvme_ns_ids *ids, bool is_shared) 3651 { 3652 struct nvme_ctrl *ctrl = ns->ctrl; 3653 struct nvme_ns_head *head = NULL; 3654 int ret = 0; 3655 3656 mutex_lock(&ctrl->subsys->lock); 3657 head = nvme_find_ns_head(ctrl->subsys, nsid); 3658 if (!head) { 3659 head = nvme_alloc_ns_head(ctrl, nsid, ids); 3660 if (IS_ERR(head)) { 3661 ret = PTR_ERR(head); 3662 goto out_unlock; 3663 } 3664 head->shared = is_shared; 3665 } else { 3666 ret = -EINVAL; 3667 if (!is_shared || !head->shared) { 3668 dev_err(ctrl->device, 3669 "Duplicate unshared namespace %d\n", nsid); 3670 goto out_put_ns_head; 3671 } 3672 if (!nvme_ns_ids_equal(&head->ids, ids)) { 3673 dev_err(ctrl->device, 3674 "IDs don't match for shared namespace %d\n", 3675 nsid); 3676 goto out_put_ns_head; 3677 } 3678 } 3679 3680 list_add_tail_rcu(&ns->siblings, &head->list); 3681 ns->head = head; 3682 mutex_unlock(&ctrl->subsys->lock); 3683 return 0; 3684 3685 out_put_ns_head: 3686 nvme_put_ns_head(head); 3687 out_unlock: 3688 mutex_unlock(&ctrl->subsys->lock); 3689 return ret; 3690 } 3691 3692 static int ns_cmp(void *priv, const struct list_head *a, 3693 const struct list_head *b) 3694 { 3695 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 3696 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 3697 3698 return nsa->head->ns_id - nsb->head->ns_id; 3699 } 3700 3701 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3702 { 3703 struct nvme_ns *ns, *ret = NULL; 3704 3705 down_read(&ctrl->namespaces_rwsem); 3706 list_for_each_entry(ns, &ctrl->namespaces, list) { 3707 if (ns->head->ns_id == nsid) { 3708 if (!nvme_get_ns(ns)) 3709 continue; 3710 ret = ns; 3711 break; 3712 } 3713 if (ns->head->ns_id > nsid) 3714 break; 3715 } 3716 up_read(&ctrl->namespaces_rwsem); 3717 return ret; 3718 } 3719 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); 3720 3721 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid, 3722 struct nvme_ns_ids *ids) 3723 { 3724 struct nvme_ns *ns; 3725 struct gendisk *disk; 3726 struct nvme_id_ns *id; 3727 int node = ctrl->numa_node; 3728 3729 if (nvme_identify_ns(ctrl, nsid, ids, &id)) 3730 return; 3731 3732 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 3733 if (!ns) 3734 goto out_free_id; 3735 3736 ns->queue = blk_mq_init_queue(ctrl->tagset); 3737 if (IS_ERR(ns->queue)) 3738 goto out_free_ns; 3739 3740 if (ctrl->opts && ctrl->opts->data_digest) 3741 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); 3742 3743 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 3744 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) 3745 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); 3746 3747 ns->queue->queuedata = ns; 3748 ns->ctrl = ctrl; 3749 kref_init(&ns->kref); 3750 3751 if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED)) 3752 goto out_free_queue; 3753 3754 disk = alloc_disk_node(0, node); 3755 if (!disk) 3756 goto out_unlink_ns; 3757 3758 disk->fops = &nvme_bdev_ops; 3759 disk->private_data = ns; 3760 disk->queue = ns->queue; 3761 /* 3762 * Without the multipath code enabled, multiple controller per 3763 * subsystems are visible as devices and thus we cannot use the 3764 * subsystem instance. 3765 */ 3766 if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags)) 3767 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, 3768 ns->head->instance); 3769 ns->disk = disk; 3770 3771 if (nvme_update_ns_info(ns, id)) 3772 goto out_put_disk; 3773 3774 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { 3775 if (nvme_nvm_register(ns, disk->disk_name, node)) { 3776 dev_warn(ctrl->device, "LightNVM init failure\n"); 3777 goto out_put_disk; 3778 } 3779 } 3780 3781 down_write(&ctrl->namespaces_rwsem); 3782 list_add_tail(&ns->list, &ctrl->namespaces); 3783 up_write(&ctrl->namespaces_rwsem); 3784 3785 nvme_get_ctrl(ctrl); 3786 3787 device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups); 3788 if (!nvme_ns_head_multipath(ns->head)) 3789 nvme_add_ns_cdev(ns); 3790 3791 nvme_mpath_add_disk(ns, id); 3792 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); 3793 kfree(id); 3794 3795 return; 3796 out_put_disk: 3797 /* prevent double queue cleanup */ 3798 ns->disk->queue = NULL; 3799 put_disk(ns->disk); 3800 out_unlink_ns: 3801 mutex_lock(&ctrl->subsys->lock); 3802 list_del_rcu(&ns->siblings); 3803 if (list_empty(&ns->head->list)) 3804 list_del_init(&ns->head->entry); 3805 mutex_unlock(&ctrl->subsys->lock); 3806 nvme_put_ns_head(ns->head); 3807 out_free_queue: 3808 blk_cleanup_queue(ns->queue); 3809 out_free_ns: 3810 kfree(ns); 3811 out_free_id: 3812 kfree(id); 3813 } 3814 3815 static void nvme_ns_remove(struct nvme_ns *ns) 3816 { 3817 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 3818 return; 3819 3820 set_capacity(ns->disk, 0); 3821 nvme_fault_inject_fini(&ns->fault_inject); 3822 3823 mutex_lock(&ns->ctrl->subsys->lock); 3824 list_del_rcu(&ns->siblings); 3825 if (list_empty(&ns->head->list)) 3826 list_del_init(&ns->head->entry); 3827 mutex_unlock(&ns->ctrl->subsys->lock); 3828 3829 synchronize_rcu(); /* guarantee not available in head->list */ 3830 nvme_mpath_clear_current_path(ns); 3831 synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */ 3832 3833 if (ns->disk->flags & GENHD_FL_UP) { 3834 if (!nvme_ns_head_multipath(ns->head)) 3835 nvme_cdev_del(&ns->cdev, &ns->cdev_device); 3836 del_gendisk(ns->disk); 3837 blk_cleanup_queue(ns->queue); 3838 if (blk_get_integrity(ns->disk)) 3839 blk_integrity_unregister(ns->disk); 3840 } 3841 3842 down_write(&ns->ctrl->namespaces_rwsem); 3843 list_del_init(&ns->list); 3844 up_write(&ns->ctrl->namespaces_rwsem); 3845 3846 nvme_mpath_check_last_path(ns); 3847 nvme_put_ns(ns); 3848 } 3849 3850 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) 3851 { 3852 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); 3853 3854 if (ns) { 3855 nvme_ns_remove(ns); 3856 nvme_put_ns(ns); 3857 } 3858 } 3859 3860 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids) 3861 { 3862 struct nvme_id_ns *id; 3863 int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; 3864 3865 if (test_bit(NVME_NS_DEAD, &ns->flags)) 3866 goto out; 3867 3868 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id); 3869 if (ret) 3870 goto out; 3871 3872 ret = NVME_SC_INVALID_NS | NVME_SC_DNR; 3873 if (!nvme_ns_ids_equal(&ns->head->ids, ids)) { 3874 dev_err(ns->ctrl->device, 3875 "identifiers changed for nsid %d\n", ns->head->ns_id); 3876 goto out_free_id; 3877 } 3878 3879 ret = nvme_update_ns_info(ns, id); 3880 3881 out_free_id: 3882 kfree(id); 3883 out: 3884 /* 3885 * Only remove the namespace if we got a fatal error back from the 3886 * device, otherwise ignore the error and just move on. 3887 * 3888 * TODO: we should probably schedule a delayed retry here. 3889 */ 3890 if (ret > 0 && (ret & NVME_SC_DNR)) 3891 nvme_ns_remove(ns); 3892 } 3893 3894 static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3895 { 3896 struct nvme_ns_ids ids = { }; 3897 struct nvme_ns *ns; 3898 3899 if (nvme_identify_ns_descs(ctrl, nsid, &ids)) 3900 return; 3901 3902 ns = nvme_find_get_ns(ctrl, nsid); 3903 if (ns) { 3904 nvme_validate_ns(ns, &ids); 3905 nvme_put_ns(ns); 3906 return; 3907 } 3908 3909 switch (ids.csi) { 3910 case NVME_CSI_NVM: 3911 nvme_alloc_ns(ctrl, nsid, &ids); 3912 break; 3913 case NVME_CSI_ZNS: 3914 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 3915 dev_warn(ctrl->device, 3916 "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", 3917 nsid); 3918 break; 3919 } 3920 if (!nvme_multi_css(ctrl)) { 3921 dev_warn(ctrl->device, 3922 "command set not reported for nsid: %d\n", 3923 nsid); 3924 break; 3925 } 3926 nvme_alloc_ns(ctrl, nsid, &ids); 3927 break; 3928 default: 3929 dev_warn(ctrl->device, "unknown csi %u for nsid %u\n", 3930 ids.csi, nsid); 3931 break; 3932 } 3933 } 3934 3935 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 3936 unsigned nsid) 3937 { 3938 struct nvme_ns *ns, *next; 3939 LIST_HEAD(rm_list); 3940 3941 down_write(&ctrl->namespaces_rwsem); 3942 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3943 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) 3944 list_move_tail(&ns->list, &rm_list); 3945 } 3946 up_write(&ctrl->namespaces_rwsem); 3947 3948 list_for_each_entry_safe(ns, next, &rm_list, list) 3949 nvme_ns_remove(ns); 3950 3951 } 3952 3953 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) 3954 { 3955 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); 3956 __le32 *ns_list; 3957 u32 prev = 0; 3958 int ret = 0, i; 3959 3960 if (nvme_ctrl_limited_cns(ctrl)) 3961 return -EOPNOTSUPP; 3962 3963 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 3964 if (!ns_list) 3965 return -ENOMEM; 3966 3967 for (;;) { 3968 struct nvme_command cmd = { 3969 .identify.opcode = nvme_admin_identify, 3970 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, 3971 .identify.nsid = cpu_to_le32(prev), 3972 }; 3973 3974 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, 3975 NVME_IDENTIFY_DATA_SIZE); 3976 if (ret) { 3977 dev_warn(ctrl->device, 3978 "Identify NS List failed (status=0x%x)\n", ret); 3979 goto free; 3980 } 3981 3982 for (i = 0; i < nr_entries; i++) { 3983 u32 nsid = le32_to_cpu(ns_list[i]); 3984 3985 if (!nsid) /* end of the list? */ 3986 goto out; 3987 nvme_validate_or_alloc_ns(ctrl, nsid); 3988 while (++prev < nsid) 3989 nvme_ns_remove_by_nsid(ctrl, prev); 3990 } 3991 } 3992 out: 3993 nvme_remove_invalid_namespaces(ctrl, prev); 3994 free: 3995 kfree(ns_list); 3996 return ret; 3997 } 3998 3999 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) 4000 { 4001 struct nvme_id_ctrl *id; 4002 u32 nn, i; 4003 4004 if (nvme_identify_ctrl(ctrl, &id)) 4005 return; 4006 nn = le32_to_cpu(id->nn); 4007 kfree(id); 4008 4009 for (i = 1; i <= nn; i++) 4010 nvme_validate_or_alloc_ns(ctrl, i); 4011 4012 nvme_remove_invalid_namespaces(ctrl, nn); 4013 } 4014 4015 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 4016 { 4017 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 4018 __le32 *log; 4019 int error; 4020 4021 log = kzalloc(log_size, GFP_KERNEL); 4022 if (!log) 4023 return; 4024 4025 /* 4026 * We need to read the log to clear the AEN, but we don't want to rely 4027 * on it for the changed namespace information as userspace could have 4028 * raced with us in reading the log page, which could cause us to miss 4029 * updates. 4030 */ 4031 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, 4032 NVME_CSI_NVM, log, log_size, 0); 4033 if (error) 4034 dev_warn(ctrl->device, 4035 "reading changed ns log failed: %d\n", error); 4036 4037 kfree(log); 4038 } 4039 4040 static void nvme_scan_work(struct work_struct *work) 4041 { 4042 struct nvme_ctrl *ctrl = 4043 container_of(work, struct nvme_ctrl, scan_work); 4044 4045 /* No tagset on a live ctrl means IO queues could not created */ 4046 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) 4047 return; 4048 4049 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 4050 dev_info(ctrl->device, "rescanning namespaces.\n"); 4051 nvme_clear_changed_ns_log(ctrl); 4052 } 4053 4054 mutex_lock(&ctrl->scan_lock); 4055 if (nvme_scan_ns_list(ctrl) != 0) 4056 nvme_scan_ns_sequential(ctrl); 4057 mutex_unlock(&ctrl->scan_lock); 4058 4059 down_write(&ctrl->namespaces_rwsem); 4060 list_sort(NULL, &ctrl->namespaces, ns_cmp); 4061 up_write(&ctrl->namespaces_rwsem); 4062 } 4063 4064 /* 4065 * This function iterates the namespace list unlocked to allow recovery from 4066 * controller failure. It is up to the caller to ensure the namespace list is 4067 * not modified by scan work while this function is executing. 4068 */ 4069 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 4070 { 4071 struct nvme_ns *ns, *next; 4072 LIST_HEAD(ns_list); 4073 4074 /* 4075 * make sure to requeue I/O to all namespaces as these 4076 * might result from the scan itself and must complete 4077 * for the scan_work to make progress 4078 */ 4079 nvme_mpath_clear_ctrl_paths(ctrl); 4080 4081 /* prevent racing with ns scanning */ 4082 flush_work(&ctrl->scan_work); 4083 4084 /* 4085 * The dead states indicates the controller was not gracefully 4086 * disconnected. In that case, we won't be able to flush any data while 4087 * removing the namespaces' disks; fail all the queues now to avoid 4088 * potentially having to clean up the failed sync later. 4089 */ 4090 if (ctrl->state == NVME_CTRL_DEAD) 4091 nvme_kill_queues(ctrl); 4092 4093 /* this is a no-op when called from the controller reset handler */ 4094 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); 4095 4096 down_write(&ctrl->namespaces_rwsem); 4097 list_splice_init(&ctrl->namespaces, &ns_list); 4098 up_write(&ctrl->namespaces_rwsem); 4099 4100 list_for_each_entry_safe(ns, next, &ns_list, list) 4101 nvme_ns_remove(ns); 4102 } 4103 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 4104 4105 static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) 4106 { 4107 struct nvme_ctrl *ctrl = 4108 container_of(dev, struct nvme_ctrl, ctrl_device); 4109 struct nvmf_ctrl_options *opts = ctrl->opts; 4110 int ret; 4111 4112 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); 4113 if (ret) 4114 return ret; 4115 4116 if (opts) { 4117 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); 4118 if (ret) 4119 return ret; 4120 4121 ret = add_uevent_var(env, "NVME_TRSVCID=%s", 4122 opts->trsvcid ?: "none"); 4123 if (ret) 4124 return ret; 4125 4126 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", 4127 opts->host_traddr ?: "none"); 4128 if (ret) 4129 return ret; 4130 4131 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", 4132 opts->host_iface ?: "none"); 4133 } 4134 return ret; 4135 } 4136 4137 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 4138 { 4139 char *envp[2] = { NULL, NULL }; 4140 u32 aen_result = ctrl->aen_result; 4141 4142 ctrl->aen_result = 0; 4143 if (!aen_result) 4144 return; 4145 4146 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 4147 if (!envp[0]) 4148 return; 4149 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4150 kfree(envp[0]); 4151 } 4152 4153 static void nvme_async_event_work(struct work_struct *work) 4154 { 4155 struct nvme_ctrl *ctrl = 4156 container_of(work, struct nvme_ctrl, async_event_work); 4157 4158 nvme_aen_uevent(ctrl); 4159 ctrl->ops->submit_async_event(ctrl); 4160 } 4161 4162 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 4163 { 4164 4165 u32 csts; 4166 4167 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 4168 return false; 4169 4170 if (csts == ~0) 4171 return false; 4172 4173 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 4174 } 4175 4176 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 4177 { 4178 struct nvme_fw_slot_info_log *log; 4179 4180 log = kmalloc(sizeof(*log), GFP_KERNEL); 4181 if (!log) 4182 return; 4183 4184 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, 4185 log, sizeof(*log), 0)) 4186 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 4187 kfree(log); 4188 } 4189 4190 static void nvme_fw_act_work(struct work_struct *work) 4191 { 4192 struct nvme_ctrl *ctrl = container_of(work, 4193 struct nvme_ctrl, fw_act_work); 4194 unsigned long fw_act_timeout; 4195 4196 if (ctrl->mtfa) 4197 fw_act_timeout = jiffies + 4198 msecs_to_jiffies(ctrl->mtfa * 100); 4199 else 4200 fw_act_timeout = jiffies + 4201 msecs_to_jiffies(admin_timeout * 1000); 4202 4203 nvme_stop_queues(ctrl); 4204 while (nvme_ctrl_pp_status(ctrl)) { 4205 if (time_after(jiffies, fw_act_timeout)) { 4206 dev_warn(ctrl->device, 4207 "Fw activation timeout, reset controller\n"); 4208 nvme_try_sched_reset(ctrl); 4209 return; 4210 } 4211 msleep(100); 4212 } 4213 4214 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) 4215 return; 4216 4217 nvme_start_queues(ctrl); 4218 /* read FW slot information to clear the AER */ 4219 nvme_get_fw_slot_info(ctrl); 4220 } 4221 4222 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 4223 { 4224 u32 aer_notice_type = (result & 0xff00) >> 8; 4225 4226 trace_nvme_async_event(ctrl, aer_notice_type); 4227 4228 switch (aer_notice_type) { 4229 case NVME_AER_NOTICE_NS_CHANGED: 4230 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 4231 nvme_queue_scan(ctrl); 4232 break; 4233 case NVME_AER_NOTICE_FW_ACT_STARTING: 4234 /* 4235 * We are (ab)using the RESETTING state to prevent subsequent 4236 * recovery actions from interfering with the controller's 4237 * firmware activation. 4238 */ 4239 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 4240 queue_work(nvme_wq, &ctrl->fw_act_work); 4241 break; 4242 #ifdef CONFIG_NVME_MULTIPATH 4243 case NVME_AER_NOTICE_ANA: 4244 if (!ctrl->ana_log_buf) 4245 break; 4246 queue_work(nvme_wq, &ctrl->ana_work); 4247 break; 4248 #endif 4249 case NVME_AER_NOTICE_DISC_CHANGED: 4250 ctrl->aen_result = result; 4251 break; 4252 default: 4253 dev_warn(ctrl->device, "async event result %08x\n", result); 4254 } 4255 } 4256 4257 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 4258 volatile union nvme_result *res) 4259 { 4260 u32 result = le32_to_cpu(res->u32); 4261 u32 aer_type = result & 0x07; 4262 4263 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 4264 return; 4265 4266 switch (aer_type) { 4267 case NVME_AER_NOTICE: 4268 nvme_handle_aen_notice(ctrl, result); 4269 break; 4270 case NVME_AER_ERROR: 4271 case NVME_AER_SMART: 4272 case NVME_AER_CSS: 4273 case NVME_AER_VS: 4274 trace_nvme_async_event(ctrl, aer_type); 4275 ctrl->aen_result = result; 4276 break; 4277 default: 4278 break; 4279 } 4280 queue_work(nvme_wq, &ctrl->async_event_work); 4281 } 4282 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 4283 4284 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 4285 { 4286 nvme_mpath_stop(ctrl); 4287 nvme_stop_keep_alive(ctrl); 4288 nvme_stop_failfast_work(ctrl); 4289 flush_work(&ctrl->async_event_work); 4290 cancel_work_sync(&ctrl->fw_act_work); 4291 } 4292 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 4293 4294 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 4295 { 4296 nvme_start_keep_alive(ctrl); 4297 4298 nvme_enable_aen(ctrl); 4299 4300 if (ctrl->queue_count > 1) { 4301 nvme_queue_scan(ctrl); 4302 nvme_start_queues(ctrl); 4303 } 4304 } 4305 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 4306 4307 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4308 { 4309 nvme_hwmon_exit(ctrl); 4310 nvme_fault_inject_fini(&ctrl->fault_inject); 4311 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4312 cdev_device_del(&ctrl->cdev, ctrl->device); 4313 nvme_put_ctrl(ctrl); 4314 } 4315 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 4316 4317 static void nvme_free_cels(struct nvme_ctrl *ctrl) 4318 { 4319 struct nvme_effects_log *cel; 4320 unsigned long i; 4321 4322 xa_for_each(&ctrl->cels, i, cel) { 4323 xa_erase(&ctrl->cels, i); 4324 kfree(cel); 4325 } 4326 4327 xa_destroy(&ctrl->cels); 4328 } 4329 4330 static void nvme_free_ctrl(struct device *dev) 4331 { 4332 struct nvme_ctrl *ctrl = 4333 container_of(dev, struct nvme_ctrl, ctrl_device); 4334 struct nvme_subsystem *subsys = ctrl->subsys; 4335 4336 if (!subsys || ctrl->instance != subsys->instance) 4337 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 4338 4339 nvme_free_cels(ctrl); 4340 nvme_mpath_uninit(ctrl); 4341 __free_page(ctrl->discard_page); 4342 4343 if (subsys) { 4344 mutex_lock(&nvme_subsystems_lock); 4345 list_del(&ctrl->subsys_entry); 4346 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 4347 mutex_unlock(&nvme_subsystems_lock); 4348 } 4349 4350 ctrl->ops->free_ctrl(ctrl); 4351 4352 if (subsys) 4353 nvme_put_subsystem(subsys); 4354 } 4355 4356 /* 4357 * Initialize a NVMe controller structures. This needs to be called during 4358 * earliest initialization so that we have the initialized structured around 4359 * during probing. 4360 */ 4361 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 4362 const struct nvme_ctrl_ops *ops, unsigned long quirks) 4363 { 4364 int ret; 4365 4366 ctrl->state = NVME_CTRL_NEW; 4367 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 4368 spin_lock_init(&ctrl->lock); 4369 mutex_init(&ctrl->scan_lock); 4370 INIT_LIST_HEAD(&ctrl->namespaces); 4371 xa_init(&ctrl->cels); 4372 init_rwsem(&ctrl->namespaces_rwsem); 4373 ctrl->dev = dev; 4374 ctrl->ops = ops; 4375 ctrl->quirks = quirks; 4376 ctrl->numa_node = NUMA_NO_NODE; 4377 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 4378 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 4379 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 4380 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 4381 init_waitqueue_head(&ctrl->state_wq); 4382 4383 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 4384 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); 4385 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 4386 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 4387 4388 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 4389 PAGE_SIZE); 4390 ctrl->discard_page = alloc_page(GFP_KERNEL); 4391 if (!ctrl->discard_page) { 4392 ret = -ENOMEM; 4393 goto out; 4394 } 4395 4396 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); 4397 if (ret < 0) 4398 goto out; 4399 ctrl->instance = ret; 4400 4401 device_initialize(&ctrl->ctrl_device); 4402 ctrl->device = &ctrl->ctrl_device; 4403 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), 4404 ctrl->instance); 4405 ctrl->device->class = nvme_class; 4406 ctrl->device->parent = ctrl->dev; 4407 ctrl->device->groups = nvme_dev_attr_groups; 4408 ctrl->device->release = nvme_free_ctrl; 4409 dev_set_drvdata(ctrl->device, ctrl); 4410 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 4411 if (ret) 4412 goto out_release_instance; 4413 4414 nvme_get_ctrl(ctrl); 4415 cdev_init(&ctrl->cdev, &nvme_dev_fops); 4416 ctrl->cdev.owner = ops->module; 4417 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 4418 if (ret) 4419 goto out_free_name; 4420 4421 /* 4422 * Initialize latency tolerance controls. The sysfs files won't 4423 * be visible to userspace unless the device actually supports APST. 4424 */ 4425 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 4426 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 4427 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 4428 4429 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 4430 nvme_mpath_init_ctrl(ctrl); 4431 4432 return 0; 4433 out_free_name: 4434 nvme_put_ctrl(ctrl); 4435 kfree_const(ctrl->device->kobj.name); 4436 out_release_instance: 4437 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 4438 out: 4439 if (ctrl->discard_page) 4440 __free_page(ctrl->discard_page); 4441 return ret; 4442 } 4443 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 4444 4445 /** 4446 * nvme_kill_queues(): Ends all namespace queues 4447 * @ctrl: the dead controller that needs to end 4448 * 4449 * Call this function when the driver determines it is unable to get the 4450 * controller in a state capable of servicing IO. 4451 */ 4452 void nvme_kill_queues(struct nvme_ctrl *ctrl) 4453 { 4454 struct nvme_ns *ns; 4455 4456 down_read(&ctrl->namespaces_rwsem); 4457 4458 /* Forcibly unquiesce queues to avoid blocking dispatch */ 4459 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) 4460 blk_mq_unquiesce_queue(ctrl->admin_q); 4461 4462 list_for_each_entry(ns, &ctrl->namespaces, list) 4463 nvme_set_queue_dying(ns); 4464 4465 up_read(&ctrl->namespaces_rwsem); 4466 } 4467 EXPORT_SYMBOL_GPL(nvme_kill_queues); 4468 4469 void nvme_unfreeze(struct nvme_ctrl *ctrl) 4470 { 4471 struct nvme_ns *ns; 4472 4473 down_read(&ctrl->namespaces_rwsem); 4474 list_for_each_entry(ns, &ctrl->namespaces, list) 4475 blk_mq_unfreeze_queue(ns->queue); 4476 up_read(&ctrl->namespaces_rwsem); 4477 } 4478 EXPORT_SYMBOL_GPL(nvme_unfreeze); 4479 4480 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 4481 { 4482 struct nvme_ns *ns; 4483 4484 down_read(&ctrl->namespaces_rwsem); 4485 list_for_each_entry(ns, &ctrl->namespaces, list) { 4486 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 4487 if (timeout <= 0) 4488 break; 4489 } 4490 up_read(&ctrl->namespaces_rwsem); 4491 return timeout; 4492 } 4493 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 4494 4495 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 4496 { 4497 struct nvme_ns *ns; 4498 4499 down_read(&ctrl->namespaces_rwsem); 4500 list_for_each_entry(ns, &ctrl->namespaces, list) 4501 blk_mq_freeze_queue_wait(ns->queue); 4502 up_read(&ctrl->namespaces_rwsem); 4503 } 4504 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 4505 4506 void nvme_start_freeze(struct nvme_ctrl *ctrl) 4507 { 4508 struct nvme_ns *ns; 4509 4510 down_read(&ctrl->namespaces_rwsem); 4511 list_for_each_entry(ns, &ctrl->namespaces, list) 4512 blk_freeze_queue_start(ns->queue); 4513 up_read(&ctrl->namespaces_rwsem); 4514 } 4515 EXPORT_SYMBOL_GPL(nvme_start_freeze); 4516 4517 void nvme_stop_queues(struct nvme_ctrl *ctrl) 4518 { 4519 struct nvme_ns *ns; 4520 4521 down_read(&ctrl->namespaces_rwsem); 4522 list_for_each_entry(ns, &ctrl->namespaces, list) 4523 blk_mq_quiesce_queue(ns->queue); 4524 up_read(&ctrl->namespaces_rwsem); 4525 } 4526 EXPORT_SYMBOL_GPL(nvme_stop_queues); 4527 4528 void nvme_start_queues(struct nvme_ctrl *ctrl) 4529 { 4530 struct nvme_ns *ns; 4531 4532 down_read(&ctrl->namespaces_rwsem); 4533 list_for_each_entry(ns, &ctrl->namespaces, list) 4534 blk_mq_unquiesce_queue(ns->queue); 4535 up_read(&ctrl->namespaces_rwsem); 4536 } 4537 EXPORT_SYMBOL_GPL(nvme_start_queues); 4538 4539 void nvme_sync_io_queues(struct nvme_ctrl *ctrl) 4540 { 4541 struct nvme_ns *ns; 4542 4543 down_read(&ctrl->namespaces_rwsem); 4544 list_for_each_entry(ns, &ctrl->namespaces, list) 4545 blk_sync_queue(ns->queue); 4546 up_read(&ctrl->namespaces_rwsem); 4547 } 4548 EXPORT_SYMBOL_GPL(nvme_sync_io_queues); 4549 4550 void nvme_sync_queues(struct nvme_ctrl *ctrl) 4551 { 4552 nvme_sync_io_queues(ctrl); 4553 if (ctrl->admin_q) 4554 blk_sync_queue(ctrl->admin_q); 4555 } 4556 EXPORT_SYMBOL_GPL(nvme_sync_queues); 4557 4558 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) 4559 { 4560 if (file->f_op != &nvme_dev_fops) 4561 return NULL; 4562 return file->private_data; 4563 } 4564 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); 4565 4566 /* 4567 * Check we didn't inadvertently grow the command structure sizes: 4568 */ 4569 static inline void _nvme_check_size(void) 4570 { 4571 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 4572 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 4573 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 4574 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 4575 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 4576 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 4577 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 4578 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 4579 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 4580 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 4581 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 4582 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 4583 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 4584 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); 4585 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); 4586 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); 4587 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 4588 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 4589 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 4590 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 4591 } 4592 4593 4594 static int __init nvme_core_init(void) 4595 { 4596 int result = -ENOMEM; 4597 4598 _nvme_check_size(); 4599 4600 nvme_wq = alloc_workqueue("nvme-wq", 4601 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4602 if (!nvme_wq) 4603 goto out; 4604 4605 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 4606 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4607 if (!nvme_reset_wq) 4608 goto destroy_wq; 4609 4610 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 4611 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4612 if (!nvme_delete_wq) 4613 goto destroy_reset_wq; 4614 4615 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0, 4616 NVME_MINORS, "nvme"); 4617 if (result < 0) 4618 goto destroy_delete_wq; 4619 4620 nvme_class = class_create(THIS_MODULE, "nvme"); 4621 if (IS_ERR(nvme_class)) { 4622 result = PTR_ERR(nvme_class); 4623 goto unregister_chrdev; 4624 } 4625 nvme_class->dev_uevent = nvme_class_uevent; 4626 4627 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); 4628 if (IS_ERR(nvme_subsys_class)) { 4629 result = PTR_ERR(nvme_subsys_class); 4630 goto destroy_class; 4631 } 4632 4633 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, 4634 "nvme-generic"); 4635 if (result < 0) 4636 goto destroy_subsys_class; 4637 4638 nvme_ns_chr_class = class_create(THIS_MODULE, "nvme-generic"); 4639 if (IS_ERR(nvme_ns_chr_class)) { 4640 result = PTR_ERR(nvme_ns_chr_class); 4641 goto unregister_generic_ns; 4642 } 4643 4644 return 0; 4645 4646 unregister_generic_ns: 4647 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 4648 destroy_subsys_class: 4649 class_destroy(nvme_subsys_class); 4650 destroy_class: 4651 class_destroy(nvme_class); 4652 unregister_chrdev: 4653 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 4654 destroy_delete_wq: 4655 destroy_workqueue(nvme_delete_wq); 4656 destroy_reset_wq: 4657 destroy_workqueue(nvme_reset_wq); 4658 destroy_wq: 4659 destroy_workqueue(nvme_wq); 4660 out: 4661 return result; 4662 } 4663 4664 static void __exit nvme_core_exit(void) 4665 { 4666 class_destroy(nvme_ns_chr_class); 4667 class_destroy(nvme_subsys_class); 4668 class_destroy(nvme_class); 4669 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 4670 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 4671 destroy_workqueue(nvme_delete_wq); 4672 destroy_workqueue(nvme_reset_wq); 4673 destroy_workqueue(nvme_wq); 4674 ida_destroy(&nvme_ns_chr_minor_ida); 4675 ida_destroy(&nvme_instance_ida); 4676 } 4677 4678 MODULE_LICENSE("GPL"); 4679 MODULE_VERSION("1.0"); 4680 module_init(nvme_core_init); 4681 module_exit(nvme_core_exit); 4682