1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/blkdev.h> 8 #include <linux/blk-mq.h> 9 #include <linux/delay.h> 10 #include <linux/errno.h> 11 #include <linux/hdreg.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/backing-dev.h> 15 #include <linux/list_sort.h> 16 #include <linux/slab.h> 17 #include <linux/types.h> 18 #include <linux/pr.h> 19 #include <linux/ptrace.h> 20 #include <linux/nvme_ioctl.h> 21 #include <linux/t10-pi.h> 22 #include <linux/pm_qos.h> 23 #include <asm/unaligned.h> 24 25 #include "nvme.h" 26 #include "fabrics.h" 27 28 #define CREATE_TRACE_POINTS 29 #include "trace.h" 30 31 #define NVME_MINORS (1U << MINORBITS) 32 33 unsigned int admin_timeout = 60; 34 module_param(admin_timeout, uint, 0644); 35 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 36 EXPORT_SYMBOL_GPL(admin_timeout); 37 38 unsigned int nvme_io_timeout = 30; 39 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 40 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 41 EXPORT_SYMBOL_GPL(nvme_io_timeout); 42 43 static unsigned char shutdown_timeout = 5; 44 module_param(shutdown_timeout, byte, 0644); 45 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 46 47 static u8 nvme_max_retries = 5; 48 module_param_named(max_retries, nvme_max_retries, byte, 0644); 49 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 50 51 static unsigned long default_ps_max_latency_us = 100000; 52 module_param(default_ps_max_latency_us, ulong, 0644); 53 MODULE_PARM_DESC(default_ps_max_latency_us, 54 "max power saving latency for new devices; use PM QOS to change per device"); 55 56 static bool force_apst; 57 module_param(force_apst, bool, 0644); 58 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 59 60 static bool streams; 61 module_param(streams, bool, 0644); 62 MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); 63 64 /* 65 * nvme_wq - hosts nvme related works that are not reset or delete 66 * nvme_reset_wq - hosts nvme reset works 67 * nvme_delete_wq - hosts nvme delete works 68 * 69 * nvme_wq will host works such are scan, aen handling, fw activation, 70 * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq 71 * runs reset works which also flush works hosted on nvme_wq for 72 * serialization purposes. nvme_delete_wq host controller deletion 73 * works which flush reset works for serialization. 74 */ 75 struct workqueue_struct *nvme_wq; 76 EXPORT_SYMBOL_GPL(nvme_wq); 77 78 struct workqueue_struct *nvme_reset_wq; 79 EXPORT_SYMBOL_GPL(nvme_reset_wq); 80 81 struct workqueue_struct *nvme_delete_wq; 82 EXPORT_SYMBOL_GPL(nvme_delete_wq); 83 84 static LIST_HEAD(nvme_subsystems); 85 static DEFINE_MUTEX(nvme_subsystems_lock); 86 87 static DEFINE_IDA(nvme_instance_ida); 88 static dev_t nvme_chr_devt; 89 static struct class *nvme_class; 90 static struct class *nvme_subsys_class; 91 92 static int nvme_revalidate_disk(struct gendisk *disk); 93 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 94 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 95 unsigned nsid); 96 97 static void nvme_set_queue_dying(struct nvme_ns *ns) 98 { 99 /* 100 * Revalidating a dead namespace sets capacity to 0. This will end 101 * buffered writers dirtying pages that can't be synced. 102 */ 103 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 104 return; 105 blk_set_queue_dying(ns->queue); 106 /* Forcibly unquiesce queues to avoid blocking dispatch */ 107 blk_mq_unquiesce_queue(ns->queue); 108 /* 109 * Revalidate after unblocking dispatchers that may be holding bd_butex 110 */ 111 revalidate_disk(ns->disk); 112 } 113 114 static void nvme_queue_scan(struct nvme_ctrl *ctrl) 115 { 116 /* 117 * Only new queue scan work when admin and IO queues are both alive 118 */ 119 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) 120 queue_work(nvme_wq, &ctrl->scan_work); 121 } 122 123 /* 124 * Use this function to proceed with scheduling reset_work for a controller 125 * that had previously been set to the resetting state. This is intended for 126 * code paths that can't be interrupted by other reset attempts. A hot removal 127 * may prevent this from succeeding. 128 */ 129 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) 130 { 131 if (ctrl->state != NVME_CTRL_RESETTING) 132 return -EBUSY; 133 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 134 return -EBUSY; 135 return 0; 136 } 137 EXPORT_SYMBOL_GPL(nvme_try_sched_reset); 138 139 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 140 { 141 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 142 return -EBUSY; 143 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 144 return -EBUSY; 145 return 0; 146 } 147 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 148 149 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 150 { 151 int ret; 152 153 ret = nvme_reset_ctrl(ctrl); 154 if (!ret) { 155 flush_work(&ctrl->reset_work); 156 if (ctrl->state != NVME_CTRL_LIVE) 157 ret = -ENETRESET; 158 } 159 160 return ret; 161 } 162 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); 163 164 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 165 { 166 dev_info(ctrl->device, 167 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); 168 169 flush_work(&ctrl->reset_work); 170 nvme_stop_ctrl(ctrl); 171 nvme_remove_namespaces(ctrl); 172 ctrl->ops->delete_ctrl(ctrl); 173 nvme_uninit_ctrl(ctrl); 174 nvme_put_ctrl(ctrl); 175 } 176 177 static void nvme_delete_ctrl_work(struct work_struct *work) 178 { 179 struct nvme_ctrl *ctrl = 180 container_of(work, struct nvme_ctrl, delete_work); 181 182 nvme_do_delete_ctrl(ctrl); 183 } 184 185 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 186 { 187 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 188 return -EBUSY; 189 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 190 return -EBUSY; 191 return 0; 192 } 193 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 194 195 static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 196 { 197 int ret = 0; 198 199 /* 200 * Keep a reference until nvme_do_delete_ctrl() complete, 201 * since ->delete_ctrl can free the controller. 202 */ 203 nvme_get_ctrl(ctrl); 204 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 205 ret = -EBUSY; 206 if (!ret) 207 nvme_do_delete_ctrl(ctrl); 208 nvme_put_ctrl(ctrl); 209 return ret; 210 } 211 212 static inline bool nvme_ns_has_pi(struct nvme_ns *ns) 213 { 214 return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); 215 } 216 217 static blk_status_t nvme_error_status(u16 status) 218 { 219 switch (status & 0x7ff) { 220 case NVME_SC_SUCCESS: 221 return BLK_STS_OK; 222 case NVME_SC_CAP_EXCEEDED: 223 return BLK_STS_NOSPC; 224 case NVME_SC_LBA_RANGE: 225 return BLK_STS_TARGET; 226 case NVME_SC_BAD_ATTRIBUTES: 227 case NVME_SC_ONCS_NOT_SUPPORTED: 228 case NVME_SC_INVALID_OPCODE: 229 case NVME_SC_INVALID_FIELD: 230 case NVME_SC_INVALID_NS: 231 return BLK_STS_NOTSUPP; 232 case NVME_SC_WRITE_FAULT: 233 case NVME_SC_READ_ERROR: 234 case NVME_SC_UNWRITTEN_BLOCK: 235 case NVME_SC_ACCESS_DENIED: 236 case NVME_SC_READ_ONLY: 237 case NVME_SC_COMPARE_FAILED: 238 return BLK_STS_MEDIUM; 239 case NVME_SC_GUARD_CHECK: 240 case NVME_SC_APPTAG_CHECK: 241 case NVME_SC_REFTAG_CHECK: 242 case NVME_SC_INVALID_PI: 243 return BLK_STS_PROTECTION; 244 case NVME_SC_RESERVATION_CONFLICT: 245 return BLK_STS_NEXUS; 246 case NVME_SC_HOST_PATH_ERROR: 247 return BLK_STS_TRANSPORT; 248 default: 249 return BLK_STS_IOERR; 250 } 251 } 252 253 static inline bool nvme_req_needs_retry(struct request *req) 254 { 255 if (blk_noretry_request(req)) 256 return false; 257 if (nvme_req(req)->status & NVME_SC_DNR) 258 return false; 259 if (nvme_req(req)->retries >= nvme_max_retries) 260 return false; 261 return true; 262 } 263 264 static void nvme_retry_req(struct request *req) 265 { 266 struct nvme_ns *ns = req->q->queuedata; 267 unsigned long delay = 0; 268 u16 crd; 269 270 /* The mask and shift result must be <= 3 */ 271 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 272 if (ns && crd) 273 delay = ns->ctrl->crdt[crd - 1] * 100; 274 275 nvme_req(req)->retries++; 276 blk_mq_requeue_request(req, false); 277 blk_mq_delay_kick_requeue_list(req->q, delay); 278 } 279 280 void nvme_complete_rq(struct request *req) 281 { 282 blk_status_t status = nvme_error_status(nvme_req(req)->status); 283 284 trace_nvme_complete_rq(req); 285 286 nvme_cleanup_cmd(req); 287 288 if (nvme_req(req)->ctrl->kas) 289 nvme_req(req)->ctrl->comp_seen = true; 290 291 if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { 292 if ((req->cmd_flags & REQ_NVME_MPATH) && 293 blk_path_error(status)) { 294 nvme_failover_req(req); 295 return; 296 } 297 298 if (!blk_queue_dying(req->q)) { 299 nvme_retry_req(req); 300 return; 301 } 302 } 303 304 nvme_trace_bio_complete(req, status); 305 blk_mq_end_request(req, status); 306 } 307 EXPORT_SYMBOL_GPL(nvme_complete_rq); 308 309 bool nvme_cancel_request(struct request *req, void *data, bool reserved) 310 { 311 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 312 "Cancelling I/O %d", req->tag); 313 314 /* don't abort one completed request */ 315 if (blk_mq_request_completed(req)) 316 return true; 317 318 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; 319 blk_mq_complete_request(req); 320 return true; 321 } 322 EXPORT_SYMBOL_GPL(nvme_cancel_request); 323 324 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 325 enum nvme_ctrl_state new_state) 326 { 327 enum nvme_ctrl_state old_state; 328 unsigned long flags; 329 bool changed = false; 330 331 spin_lock_irqsave(&ctrl->lock, flags); 332 333 old_state = ctrl->state; 334 switch (new_state) { 335 case NVME_CTRL_LIVE: 336 switch (old_state) { 337 case NVME_CTRL_NEW: 338 case NVME_CTRL_RESETTING: 339 case NVME_CTRL_CONNECTING: 340 changed = true; 341 /* FALLTHRU */ 342 default: 343 break; 344 } 345 break; 346 case NVME_CTRL_RESETTING: 347 switch (old_state) { 348 case NVME_CTRL_NEW: 349 case NVME_CTRL_LIVE: 350 changed = true; 351 /* FALLTHRU */ 352 default: 353 break; 354 } 355 break; 356 case NVME_CTRL_CONNECTING: 357 switch (old_state) { 358 case NVME_CTRL_NEW: 359 case NVME_CTRL_RESETTING: 360 changed = true; 361 /* FALLTHRU */ 362 default: 363 break; 364 } 365 break; 366 case NVME_CTRL_DELETING: 367 switch (old_state) { 368 case NVME_CTRL_LIVE: 369 case NVME_CTRL_RESETTING: 370 case NVME_CTRL_CONNECTING: 371 changed = true; 372 /* FALLTHRU */ 373 default: 374 break; 375 } 376 break; 377 case NVME_CTRL_DEAD: 378 switch (old_state) { 379 case NVME_CTRL_DELETING: 380 changed = true; 381 /* FALLTHRU */ 382 default: 383 break; 384 } 385 break; 386 default: 387 break; 388 } 389 390 if (changed) { 391 ctrl->state = new_state; 392 wake_up_all(&ctrl->state_wq); 393 } 394 395 spin_unlock_irqrestore(&ctrl->lock, flags); 396 if (changed && ctrl->state == NVME_CTRL_LIVE) 397 nvme_kick_requeue_lists(ctrl); 398 return changed; 399 } 400 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 401 402 /* 403 * Returns true for sink states that can't ever transition back to live. 404 */ 405 static bool nvme_state_terminal(struct nvme_ctrl *ctrl) 406 { 407 switch (ctrl->state) { 408 case NVME_CTRL_NEW: 409 case NVME_CTRL_LIVE: 410 case NVME_CTRL_RESETTING: 411 case NVME_CTRL_CONNECTING: 412 return false; 413 case NVME_CTRL_DELETING: 414 case NVME_CTRL_DEAD: 415 return true; 416 default: 417 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); 418 return true; 419 } 420 } 421 422 /* 423 * Waits for the controller state to be resetting, or returns false if it is 424 * not possible to ever transition to that state. 425 */ 426 bool nvme_wait_reset(struct nvme_ctrl *ctrl) 427 { 428 wait_event(ctrl->state_wq, 429 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || 430 nvme_state_terminal(ctrl)); 431 return ctrl->state == NVME_CTRL_RESETTING; 432 } 433 EXPORT_SYMBOL_GPL(nvme_wait_reset); 434 435 static void nvme_free_ns_head(struct kref *ref) 436 { 437 struct nvme_ns_head *head = 438 container_of(ref, struct nvme_ns_head, ref); 439 440 nvme_mpath_remove_disk(head); 441 ida_simple_remove(&head->subsys->ns_ida, head->instance); 442 list_del_init(&head->entry); 443 cleanup_srcu_struct(&head->srcu); 444 nvme_put_subsystem(head->subsys); 445 kfree(head); 446 } 447 448 static void nvme_put_ns_head(struct nvme_ns_head *head) 449 { 450 kref_put(&head->ref, nvme_free_ns_head); 451 } 452 453 static void nvme_free_ns(struct kref *kref) 454 { 455 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 456 457 if (ns->ndev) 458 nvme_nvm_unregister(ns); 459 460 put_disk(ns->disk); 461 nvme_put_ns_head(ns->head); 462 nvme_put_ctrl(ns->ctrl); 463 kfree(ns); 464 } 465 466 static void nvme_put_ns(struct nvme_ns *ns) 467 { 468 kref_put(&ns->kref, nvme_free_ns); 469 } 470 471 static inline void nvme_clear_nvme_request(struct request *req) 472 { 473 if (!(req->rq_flags & RQF_DONTPREP)) { 474 nvme_req(req)->retries = 0; 475 nvme_req(req)->flags = 0; 476 req->rq_flags |= RQF_DONTPREP; 477 } 478 } 479 480 struct request *nvme_alloc_request(struct request_queue *q, 481 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) 482 { 483 unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 484 struct request *req; 485 486 if (qid == NVME_QID_ANY) { 487 req = blk_mq_alloc_request(q, op, flags); 488 } else { 489 req = blk_mq_alloc_request_hctx(q, op, flags, 490 qid ? qid - 1 : 0); 491 } 492 if (IS_ERR(req)) 493 return req; 494 495 req->cmd_flags |= REQ_FAILFAST_DRIVER; 496 nvme_clear_nvme_request(req); 497 nvme_req(req)->cmd = cmd; 498 499 return req; 500 } 501 EXPORT_SYMBOL_GPL(nvme_alloc_request); 502 503 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) 504 { 505 struct nvme_command c; 506 507 memset(&c, 0, sizeof(c)); 508 509 c.directive.opcode = nvme_admin_directive_send; 510 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); 511 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; 512 c.directive.dtype = NVME_DIR_IDENTIFY; 513 c.directive.tdtype = NVME_DIR_STREAMS; 514 c.directive.endir = enable ? NVME_DIR_ENDIR : 0; 515 516 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); 517 } 518 519 static int nvme_disable_streams(struct nvme_ctrl *ctrl) 520 { 521 return nvme_toggle_streams(ctrl, false); 522 } 523 524 static int nvme_enable_streams(struct nvme_ctrl *ctrl) 525 { 526 return nvme_toggle_streams(ctrl, true); 527 } 528 529 static int nvme_get_stream_params(struct nvme_ctrl *ctrl, 530 struct streams_directive_params *s, u32 nsid) 531 { 532 struct nvme_command c; 533 534 memset(&c, 0, sizeof(c)); 535 memset(s, 0, sizeof(*s)); 536 537 c.directive.opcode = nvme_admin_directive_recv; 538 c.directive.nsid = cpu_to_le32(nsid); 539 c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); 540 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 541 c.directive.dtype = NVME_DIR_STREAMS; 542 543 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); 544 } 545 546 static int nvme_configure_directives(struct nvme_ctrl *ctrl) 547 { 548 struct streams_directive_params s; 549 int ret; 550 551 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) 552 return 0; 553 if (!streams) 554 return 0; 555 556 ret = nvme_enable_streams(ctrl); 557 if (ret) 558 return ret; 559 560 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); 561 if (ret) 562 return ret; 563 564 ctrl->nssa = le16_to_cpu(s.nssa); 565 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { 566 dev_info(ctrl->device, "too few streams (%u) available\n", 567 ctrl->nssa); 568 nvme_disable_streams(ctrl); 569 return 0; 570 } 571 572 ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); 573 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); 574 return 0; 575 } 576 577 /* 578 * Check if 'req' has a write hint associated with it. If it does, assign 579 * a valid namespace stream to the write. 580 */ 581 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, 582 struct request *req, u16 *control, 583 u32 *dsmgmt) 584 { 585 enum rw_hint streamid = req->write_hint; 586 587 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE) 588 streamid = 0; 589 else { 590 streamid--; 591 if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) 592 return; 593 594 *control |= NVME_RW_DTYPE_STREAMS; 595 *dsmgmt |= streamid << 16; 596 } 597 598 if (streamid < ARRAY_SIZE(req->q->write_hints)) 599 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; 600 } 601 602 static inline void nvme_setup_flush(struct nvme_ns *ns, 603 struct nvme_command *cmnd) 604 { 605 cmnd->common.opcode = nvme_cmd_flush; 606 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 607 } 608 609 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 610 struct nvme_command *cmnd) 611 { 612 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 613 struct nvme_dsm_range *range; 614 struct bio *bio; 615 616 /* 617 * Some devices do not consider the DSM 'Number of Ranges' field when 618 * determining how much data to DMA. Always allocate memory for maximum 619 * number of segments to prevent device reading beyond end of buffer. 620 */ 621 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; 622 623 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); 624 if (!range) { 625 /* 626 * If we fail allocation our range, fallback to the controller 627 * discard page. If that's also busy, it's safe to return 628 * busy, as we know we can make progress once that's freed. 629 */ 630 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 631 return BLK_STS_RESOURCE; 632 633 range = page_address(ns->ctrl->discard_page); 634 } 635 636 __rq_for_each_bio(bio, req) { 637 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); 638 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 639 640 if (n < segments) { 641 range[n].cattr = cpu_to_le32(0); 642 range[n].nlb = cpu_to_le32(nlb); 643 range[n].slba = cpu_to_le64(slba); 644 } 645 n++; 646 } 647 648 if (WARN_ON_ONCE(n != segments)) { 649 if (virt_to_page(range) == ns->ctrl->discard_page) 650 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 651 else 652 kfree(range); 653 return BLK_STS_IOERR; 654 } 655 656 cmnd->dsm.opcode = nvme_cmd_dsm; 657 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 658 cmnd->dsm.nr = cpu_to_le32(segments - 1); 659 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 660 661 req->special_vec.bv_page = virt_to_page(range); 662 req->special_vec.bv_offset = offset_in_page(range); 663 req->special_vec.bv_len = alloc_size; 664 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 665 666 return BLK_STS_OK; 667 } 668 669 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 670 struct request *req, struct nvme_command *cmnd) 671 { 672 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 673 return nvme_setup_discard(ns, req, cmnd); 674 675 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 676 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 677 cmnd->write_zeroes.slba = 678 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 679 cmnd->write_zeroes.length = 680 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 681 cmnd->write_zeroes.control = 0; 682 return BLK_STS_OK; 683 } 684 685 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 686 struct request *req, struct nvme_command *cmnd) 687 { 688 struct nvme_ctrl *ctrl = ns->ctrl; 689 u16 control = 0; 690 u32 dsmgmt = 0; 691 692 if (req->cmd_flags & REQ_FUA) 693 control |= NVME_RW_FUA; 694 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 695 control |= NVME_RW_LR; 696 697 if (req->cmd_flags & REQ_RAHEAD) 698 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 699 700 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); 701 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 702 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 703 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 704 705 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) 706 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); 707 708 if (ns->ms) { 709 /* 710 * If formated with metadata, the block layer always provides a 711 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 712 * we enable the PRACT bit for protection information or set the 713 * namespace capacity to zero to prevent any I/O. 714 */ 715 if (!blk_integrity_rq(req)) { 716 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 717 return BLK_STS_NOTSUPP; 718 control |= NVME_RW_PRINFO_PRACT; 719 } 720 721 switch (ns->pi_type) { 722 case NVME_NS_DPS_PI_TYPE3: 723 control |= NVME_RW_PRINFO_PRCHK_GUARD; 724 break; 725 case NVME_NS_DPS_PI_TYPE1: 726 case NVME_NS_DPS_PI_TYPE2: 727 control |= NVME_RW_PRINFO_PRCHK_GUARD | 728 NVME_RW_PRINFO_PRCHK_REF; 729 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 730 break; 731 } 732 } 733 734 cmnd->rw.control = cpu_to_le16(control); 735 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 736 return 0; 737 } 738 739 void nvme_cleanup_cmd(struct request *req) 740 { 741 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 742 struct nvme_ns *ns = req->rq_disk->private_data; 743 struct page *page = req->special_vec.bv_page; 744 745 if (page == ns->ctrl->discard_page) 746 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 747 else 748 kfree(page_address(page) + req->special_vec.bv_offset); 749 } 750 } 751 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 752 753 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 754 struct nvme_command *cmd) 755 { 756 blk_status_t ret = BLK_STS_OK; 757 758 nvme_clear_nvme_request(req); 759 760 memset(cmd, 0, sizeof(*cmd)); 761 switch (req_op(req)) { 762 case REQ_OP_DRV_IN: 763 case REQ_OP_DRV_OUT: 764 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); 765 break; 766 case REQ_OP_FLUSH: 767 nvme_setup_flush(ns, cmd); 768 break; 769 case REQ_OP_WRITE_ZEROES: 770 ret = nvme_setup_write_zeroes(ns, req, cmd); 771 break; 772 case REQ_OP_DISCARD: 773 ret = nvme_setup_discard(ns, req, cmd); 774 break; 775 case REQ_OP_READ: 776 case REQ_OP_WRITE: 777 ret = nvme_setup_rw(ns, req, cmd); 778 break; 779 default: 780 WARN_ON_ONCE(1); 781 return BLK_STS_IOERR; 782 } 783 784 cmd->common.command_id = req->tag; 785 trace_nvme_setup_cmd(req, cmd); 786 return ret; 787 } 788 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 789 790 static void nvme_end_sync_rq(struct request *rq, blk_status_t error) 791 { 792 struct completion *waiting = rq->end_io_data; 793 794 rq->end_io_data = NULL; 795 complete(waiting); 796 } 797 798 static void nvme_execute_rq_polled(struct request_queue *q, 799 struct gendisk *bd_disk, struct request *rq, int at_head) 800 { 801 DECLARE_COMPLETION_ONSTACK(wait); 802 803 WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)); 804 805 rq->cmd_flags |= REQ_HIPRI; 806 rq->end_io_data = &wait; 807 blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq); 808 809 while (!completion_done(&wait)) { 810 blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true); 811 cond_resched(); 812 } 813 } 814 815 /* 816 * Returns 0 on success. If the result is negative, it's a Linux error code; 817 * if the result is positive, it's an NVM Express status code 818 */ 819 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 820 union nvme_result *result, void *buffer, unsigned bufflen, 821 unsigned timeout, int qid, int at_head, 822 blk_mq_req_flags_t flags, bool poll) 823 { 824 struct request *req; 825 int ret; 826 827 req = nvme_alloc_request(q, cmd, flags, qid); 828 if (IS_ERR(req)) 829 return PTR_ERR(req); 830 831 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 832 833 if (buffer && bufflen) { 834 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 835 if (ret) 836 goto out; 837 } 838 839 if (poll) 840 nvme_execute_rq_polled(req->q, NULL, req, at_head); 841 else 842 blk_execute_rq(req->q, NULL, req, at_head); 843 if (result) 844 *result = nvme_req(req)->result; 845 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 846 ret = -EINTR; 847 else 848 ret = nvme_req(req)->status; 849 out: 850 blk_mq_free_request(req); 851 return ret; 852 } 853 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 854 855 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 856 void *buffer, unsigned bufflen) 857 { 858 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, 859 NVME_QID_ANY, 0, 0, false); 860 } 861 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 862 863 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, 864 unsigned len, u32 seed, bool write) 865 { 866 struct bio_integrity_payload *bip; 867 int ret = -ENOMEM; 868 void *buf; 869 870 buf = kmalloc(len, GFP_KERNEL); 871 if (!buf) 872 goto out; 873 874 ret = -EFAULT; 875 if (write && copy_from_user(buf, ubuf, len)) 876 goto out_free_meta; 877 878 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 879 if (IS_ERR(bip)) { 880 ret = PTR_ERR(bip); 881 goto out_free_meta; 882 } 883 884 bip->bip_iter.bi_size = len; 885 bip->bip_iter.bi_sector = seed; 886 ret = bio_integrity_add_page(bio, virt_to_page(buf), len, 887 offset_in_page(buf)); 888 if (ret == len) 889 return buf; 890 ret = -ENOMEM; 891 out_free_meta: 892 kfree(buf); 893 out: 894 return ERR_PTR(ret); 895 } 896 897 static int nvme_submit_user_cmd(struct request_queue *q, 898 struct nvme_command *cmd, void __user *ubuffer, 899 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 900 u32 meta_seed, u64 *result, unsigned timeout) 901 { 902 bool write = nvme_is_write(cmd); 903 struct nvme_ns *ns = q->queuedata; 904 struct gendisk *disk = ns ? ns->disk : NULL; 905 struct request *req; 906 struct bio *bio = NULL; 907 void *meta = NULL; 908 int ret; 909 910 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); 911 if (IS_ERR(req)) 912 return PTR_ERR(req); 913 914 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 915 nvme_req(req)->flags |= NVME_REQ_USERCMD; 916 917 if (ubuffer && bufflen) { 918 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 919 GFP_KERNEL); 920 if (ret) 921 goto out; 922 bio = req->bio; 923 bio->bi_disk = disk; 924 if (disk && meta_buffer && meta_len) { 925 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, 926 meta_seed, write); 927 if (IS_ERR(meta)) { 928 ret = PTR_ERR(meta); 929 goto out_unmap; 930 } 931 req->cmd_flags |= REQ_INTEGRITY; 932 } 933 } 934 935 blk_execute_rq(req->q, disk, req, 0); 936 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 937 ret = -EINTR; 938 else 939 ret = nvme_req(req)->status; 940 if (result) 941 *result = le64_to_cpu(nvme_req(req)->result.u64); 942 if (meta && !ret && !write) { 943 if (copy_to_user(meta_buffer, meta, meta_len)) 944 ret = -EFAULT; 945 } 946 kfree(meta); 947 out_unmap: 948 if (bio) 949 blk_rq_unmap_user(bio); 950 out: 951 blk_mq_free_request(req); 952 return ret; 953 } 954 955 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) 956 { 957 struct nvme_ctrl *ctrl = rq->end_io_data; 958 unsigned long flags; 959 bool startka = false; 960 961 blk_mq_free_request(rq); 962 963 if (status) { 964 dev_err(ctrl->device, 965 "failed nvme_keep_alive_end_io error=%d\n", 966 status); 967 return; 968 } 969 970 ctrl->comp_seen = false; 971 spin_lock_irqsave(&ctrl->lock, flags); 972 if (ctrl->state == NVME_CTRL_LIVE || 973 ctrl->state == NVME_CTRL_CONNECTING) 974 startka = true; 975 spin_unlock_irqrestore(&ctrl->lock, flags); 976 if (startka) 977 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 978 } 979 980 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 981 { 982 struct request *rq; 983 984 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, 985 NVME_QID_ANY); 986 if (IS_ERR(rq)) 987 return PTR_ERR(rq); 988 989 rq->timeout = ctrl->kato * HZ; 990 rq->end_io_data = ctrl; 991 992 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); 993 994 return 0; 995 } 996 997 static void nvme_keep_alive_work(struct work_struct *work) 998 { 999 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 1000 struct nvme_ctrl, ka_work); 1001 bool comp_seen = ctrl->comp_seen; 1002 1003 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1004 dev_dbg(ctrl->device, 1005 "reschedule traffic based keep-alive timer\n"); 1006 ctrl->comp_seen = false; 1007 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 1008 return; 1009 } 1010 1011 if (nvme_keep_alive(ctrl)) { 1012 /* allocation failure, reset the controller */ 1013 dev_err(ctrl->device, "keep-alive failed\n"); 1014 nvme_reset_ctrl(ctrl); 1015 return; 1016 } 1017 } 1018 1019 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 1020 { 1021 if (unlikely(ctrl->kato == 0)) 1022 return; 1023 1024 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 1025 } 1026 1027 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 1028 { 1029 if (unlikely(ctrl->kato == 0)) 1030 return; 1031 1032 cancel_delayed_work_sync(&ctrl->ka_work); 1033 } 1034 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 1035 1036 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 1037 { 1038 struct nvme_command c = { }; 1039 int error; 1040 1041 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1042 c.identify.opcode = nvme_admin_identify; 1043 c.identify.cns = NVME_ID_CNS_CTRL; 1044 1045 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 1046 if (!*id) 1047 return -ENOMEM; 1048 1049 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1050 sizeof(struct nvme_id_ctrl)); 1051 if (error) 1052 kfree(*id); 1053 return error; 1054 } 1055 1056 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, 1057 struct nvme_ns_ids *ids) 1058 { 1059 struct nvme_command c = { }; 1060 int status; 1061 void *data; 1062 int pos; 1063 int len; 1064 1065 c.identify.opcode = nvme_admin_identify; 1066 c.identify.nsid = cpu_to_le32(nsid); 1067 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1068 1069 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1070 if (!data) 1071 return -ENOMEM; 1072 1073 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1074 NVME_IDENTIFY_DATA_SIZE); 1075 if (status) 1076 goto free_data; 1077 1078 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1079 struct nvme_ns_id_desc *cur = data + pos; 1080 1081 if (cur->nidl == 0) 1082 break; 1083 1084 switch (cur->nidt) { 1085 case NVME_NIDT_EUI64: 1086 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1087 dev_warn(ctrl->device, 1088 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n", 1089 cur->nidl); 1090 goto free_data; 1091 } 1092 len = NVME_NIDT_EUI64_LEN; 1093 memcpy(ids->eui64, data + pos + sizeof(*cur), len); 1094 break; 1095 case NVME_NIDT_NGUID: 1096 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1097 dev_warn(ctrl->device, 1098 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n", 1099 cur->nidl); 1100 goto free_data; 1101 } 1102 len = NVME_NIDT_NGUID_LEN; 1103 memcpy(ids->nguid, data + pos + sizeof(*cur), len); 1104 break; 1105 case NVME_NIDT_UUID: 1106 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1107 dev_warn(ctrl->device, 1108 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n", 1109 cur->nidl); 1110 goto free_data; 1111 } 1112 len = NVME_NIDT_UUID_LEN; 1113 uuid_copy(&ids->uuid, data + pos + sizeof(*cur)); 1114 break; 1115 default: 1116 /* Skip unknown types */ 1117 len = cur->nidl; 1118 break; 1119 } 1120 1121 len += sizeof(*cur); 1122 } 1123 free_data: 1124 kfree(data); 1125 return status; 1126 } 1127 1128 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) 1129 { 1130 struct nvme_command c = { }; 1131 1132 c.identify.opcode = nvme_admin_identify; 1133 c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; 1134 c.identify.nsid = cpu_to_le32(nsid); 1135 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 1136 NVME_IDENTIFY_DATA_SIZE); 1137 } 1138 1139 static int nvme_identify_ns(struct nvme_ctrl *ctrl, 1140 unsigned nsid, struct nvme_id_ns **id) 1141 { 1142 struct nvme_command c = { }; 1143 int error; 1144 1145 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1146 c.identify.opcode = nvme_admin_identify; 1147 c.identify.nsid = cpu_to_le32(nsid); 1148 c.identify.cns = NVME_ID_CNS_NS; 1149 1150 *id = kmalloc(sizeof(**id), GFP_KERNEL); 1151 if (!*id) 1152 return -ENOMEM; 1153 1154 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); 1155 if (error) { 1156 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1157 kfree(*id); 1158 } 1159 1160 return error; 1161 } 1162 1163 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, 1164 unsigned int dword11, void *buffer, size_t buflen, u32 *result) 1165 { 1166 struct nvme_command c; 1167 union nvme_result res; 1168 int ret; 1169 1170 memset(&c, 0, sizeof(c)); 1171 c.features.opcode = op; 1172 c.features.fid = cpu_to_le32(fid); 1173 c.features.dword11 = cpu_to_le32(dword11); 1174 1175 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1176 buffer, buflen, 0, NVME_QID_ANY, 0, 0, false); 1177 if (ret >= 0 && result) 1178 *result = le32_to_cpu(res.u32); 1179 return ret; 1180 } 1181 1182 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 1183 unsigned int dword11, void *buffer, size_t buflen, 1184 u32 *result) 1185 { 1186 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, 1187 buflen, result); 1188 } 1189 EXPORT_SYMBOL_GPL(nvme_set_features); 1190 1191 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 1192 unsigned int dword11, void *buffer, size_t buflen, 1193 u32 *result) 1194 { 1195 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, 1196 buflen, result); 1197 } 1198 EXPORT_SYMBOL_GPL(nvme_get_features); 1199 1200 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1201 { 1202 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1203 u32 result; 1204 int status, nr_io_queues; 1205 1206 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1207 &result); 1208 if (status < 0) 1209 return status; 1210 1211 /* 1212 * Degraded controllers might return an error when setting the queue 1213 * count. We still want to be able to bring them online and offer 1214 * access to the admin queue, as that might be only way to fix them up. 1215 */ 1216 if (status > 0) { 1217 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1218 *count = 0; 1219 } else { 1220 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1221 *count = min(*count, nr_io_queues); 1222 } 1223 1224 return 0; 1225 } 1226 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1227 1228 #define NVME_AEN_SUPPORTED \ 1229 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ 1230 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) 1231 1232 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1233 { 1234 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1235 int status; 1236 1237 if (!supported_aens) 1238 return; 1239 1240 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1241 NULL, 0, &result); 1242 if (status) 1243 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1244 supported_aens); 1245 1246 queue_work(nvme_wq, &ctrl->async_event_work); 1247 } 1248 1249 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1250 { 1251 struct nvme_user_io io; 1252 struct nvme_command c; 1253 unsigned length, meta_len; 1254 void __user *metadata; 1255 1256 if (copy_from_user(&io, uio, sizeof(io))) 1257 return -EFAULT; 1258 if (io.flags) 1259 return -EINVAL; 1260 1261 switch (io.opcode) { 1262 case nvme_cmd_write: 1263 case nvme_cmd_read: 1264 case nvme_cmd_compare: 1265 break; 1266 default: 1267 return -EINVAL; 1268 } 1269 1270 length = (io.nblocks + 1) << ns->lba_shift; 1271 meta_len = (io.nblocks + 1) * ns->ms; 1272 metadata = (void __user *)(uintptr_t)io.metadata; 1273 1274 if (ns->ext) { 1275 length += meta_len; 1276 meta_len = 0; 1277 } else if (meta_len) { 1278 if ((io.metadata & 3) || !io.metadata) 1279 return -EINVAL; 1280 } 1281 1282 memset(&c, 0, sizeof(c)); 1283 c.rw.opcode = io.opcode; 1284 c.rw.flags = io.flags; 1285 c.rw.nsid = cpu_to_le32(ns->head->ns_id); 1286 c.rw.slba = cpu_to_le64(io.slba); 1287 c.rw.length = cpu_to_le16(io.nblocks); 1288 c.rw.control = cpu_to_le16(io.control); 1289 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 1290 c.rw.reftag = cpu_to_le32(io.reftag); 1291 c.rw.apptag = cpu_to_le16(io.apptag); 1292 c.rw.appmask = cpu_to_le16(io.appmask); 1293 1294 return nvme_submit_user_cmd(ns->queue, &c, 1295 (void __user *)(uintptr_t)io.addr, length, 1296 metadata, meta_len, lower_32_bits(io.slba), NULL, 0); 1297 } 1298 1299 static u32 nvme_known_admin_effects(u8 opcode) 1300 { 1301 switch (opcode) { 1302 case nvme_admin_format_nvm: 1303 return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | 1304 NVME_CMD_EFFECTS_CSE_MASK; 1305 case nvme_admin_sanitize_nvm: 1306 return NVME_CMD_EFFECTS_CSE_MASK; 1307 default: 1308 break; 1309 } 1310 return 0; 1311 } 1312 1313 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1314 u8 opcode) 1315 { 1316 u32 effects = 0; 1317 1318 if (ns) { 1319 if (ctrl->effects) 1320 effects = le32_to_cpu(ctrl->effects->iocs[opcode]); 1321 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1322 dev_warn(ctrl->device, 1323 "IO command:%02x has unhandled effects:%08x\n", 1324 opcode, effects); 1325 return 0; 1326 } 1327 1328 if (ctrl->effects) 1329 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1330 effects |= nvme_known_admin_effects(opcode); 1331 1332 /* 1333 * For simplicity, IO to all namespaces is quiesced even if the command 1334 * effects say only one namespace is affected. 1335 */ 1336 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1337 mutex_lock(&ctrl->scan_lock); 1338 mutex_lock(&ctrl->subsys->lock); 1339 nvme_mpath_start_freeze(ctrl->subsys); 1340 nvme_mpath_wait_freeze(ctrl->subsys); 1341 nvme_start_freeze(ctrl); 1342 nvme_wait_freeze(ctrl); 1343 } 1344 return effects; 1345 } 1346 1347 static void nvme_update_formats(struct nvme_ctrl *ctrl) 1348 { 1349 struct nvme_ns *ns; 1350 1351 down_read(&ctrl->namespaces_rwsem); 1352 list_for_each_entry(ns, &ctrl->namespaces, list) 1353 if (ns->disk && nvme_revalidate_disk(ns->disk)) 1354 nvme_set_queue_dying(ns); 1355 up_read(&ctrl->namespaces_rwsem); 1356 } 1357 1358 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1359 { 1360 /* 1361 * Revalidate LBA changes prior to unfreezing. This is necessary to 1362 * prevent memory corruption if a logical block size was changed by 1363 * this command. 1364 */ 1365 if (effects & NVME_CMD_EFFECTS_LBCC) 1366 nvme_update_formats(ctrl); 1367 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1368 nvme_unfreeze(ctrl); 1369 nvme_mpath_unfreeze(ctrl->subsys); 1370 mutex_unlock(&ctrl->subsys->lock); 1371 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); 1372 mutex_unlock(&ctrl->scan_lock); 1373 } 1374 if (effects & NVME_CMD_EFFECTS_CCC) 1375 nvme_init_identify(ctrl); 1376 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) 1377 nvme_queue_scan(ctrl); 1378 } 1379 1380 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1381 struct nvme_passthru_cmd __user *ucmd) 1382 { 1383 struct nvme_passthru_cmd cmd; 1384 struct nvme_command c; 1385 unsigned timeout = 0; 1386 u32 effects; 1387 u64 result; 1388 int status; 1389 1390 if (!capable(CAP_SYS_ADMIN)) 1391 return -EACCES; 1392 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1393 return -EFAULT; 1394 if (cmd.flags) 1395 return -EINVAL; 1396 1397 memset(&c, 0, sizeof(c)); 1398 c.common.opcode = cmd.opcode; 1399 c.common.flags = cmd.flags; 1400 c.common.nsid = cpu_to_le32(cmd.nsid); 1401 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1402 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1403 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 1404 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 1405 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 1406 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 1407 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 1408 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 1409 1410 if (cmd.timeout_ms) 1411 timeout = msecs_to_jiffies(cmd.timeout_ms); 1412 1413 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); 1414 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1415 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 1416 (void __user *)(uintptr_t)cmd.metadata, 1417 cmd.metadata_len, 0, &result, timeout); 1418 nvme_passthru_end(ctrl, effects); 1419 1420 if (status >= 0) { 1421 if (put_user(result, &ucmd->result)) 1422 return -EFAULT; 1423 } 1424 1425 return status; 1426 } 1427 1428 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1429 struct nvme_passthru_cmd64 __user *ucmd) 1430 { 1431 struct nvme_passthru_cmd64 cmd; 1432 struct nvme_command c; 1433 unsigned timeout = 0; 1434 u32 effects; 1435 int status; 1436 1437 if (!capable(CAP_SYS_ADMIN)) 1438 return -EACCES; 1439 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1440 return -EFAULT; 1441 if (cmd.flags) 1442 return -EINVAL; 1443 1444 memset(&c, 0, sizeof(c)); 1445 c.common.opcode = cmd.opcode; 1446 c.common.flags = cmd.flags; 1447 c.common.nsid = cpu_to_le32(cmd.nsid); 1448 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1449 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1450 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 1451 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 1452 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 1453 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 1454 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 1455 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 1456 1457 if (cmd.timeout_ms) 1458 timeout = msecs_to_jiffies(cmd.timeout_ms); 1459 1460 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); 1461 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1462 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 1463 (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, 1464 0, &cmd.result, timeout); 1465 nvme_passthru_end(ctrl, effects); 1466 1467 if (status >= 0) { 1468 if (put_user(cmd.result, &ucmd->result)) 1469 return -EFAULT; 1470 } 1471 1472 return status; 1473 } 1474 1475 /* 1476 * Issue ioctl requests on the first available path. Note that unlike normal 1477 * block layer requests we will not retry failed request on another controller. 1478 */ 1479 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, 1480 struct nvme_ns_head **head, int *srcu_idx) 1481 { 1482 #ifdef CONFIG_NVME_MULTIPATH 1483 if (disk->fops == &nvme_ns_head_ops) { 1484 struct nvme_ns *ns; 1485 1486 *head = disk->private_data; 1487 *srcu_idx = srcu_read_lock(&(*head)->srcu); 1488 ns = nvme_find_path(*head); 1489 if (!ns) 1490 srcu_read_unlock(&(*head)->srcu, *srcu_idx); 1491 return ns; 1492 } 1493 #endif 1494 *head = NULL; 1495 *srcu_idx = -1; 1496 return disk->private_data; 1497 } 1498 1499 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) 1500 { 1501 if (head) 1502 srcu_read_unlock(&head->srcu, idx); 1503 } 1504 1505 static bool is_ctrl_ioctl(unsigned int cmd) 1506 { 1507 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) 1508 return true; 1509 if (is_sed_ioctl(cmd)) 1510 return true; 1511 return false; 1512 } 1513 1514 static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, 1515 void __user *argp, 1516 struct nvme_ns_head *head, 1517 int srcu_idx) 1518 { 1519 struct nvme_ctrl *ctrl = ns->ctrl; 1520 int ret; 1521 1522 nvme_get_ctrl(ns->ctrl); 1523 nvme_put_ns_from_disk(head, srcu_idx); 1524 1525 switch (cmd) { 1526 case NVME_IOCTL_ADMIN_CMD: 1527 ret = nvme_user_cmd(ctrl, NULL, argp); 1528 break; 1529 case NVME_IOCTL_ADMIN64_CMD: 1530 ret = nvme_user_cmd64(ctrl, NULL, argp); 1531 break; 1532 default: 1533 ret = sed_ioctl(ctrl->opal_dev, cmd, argp); 1534 break; 1535 } 1536 nvme_put_ctrl(ctrl); 1537 return ret; 1538 } 1539 1540 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 1541 unsigned int cmd, unsigned long arg) 1542 { 1543 struct nvme_ns_head *head = NULL; 1544 void __user *argp = (void __user *)arg; 1545 struct nvme_ns *ns; 1546 int srcu_idx, ret; 1547 1548 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1549 if (unlikely(!ns)) 1550 return -EWOULDBLOCK; 1551 1552 /* 1553 * Handle ioctls that apply to the controller instead of the namespace 1554 * seperately and drop the ns SRCU reference early. This avoids a 1555 * deadlock when deleting namespaces using the passthrough interface. 1556 */ 1557 if (is_ctrl_ioctl(cmd)) 1558 return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); 1559 1560 switch (cmd) { 1561 case NVME_IOCTL_ID: 1562 force_successful_syscall_return(); 1563 ret = ns->head->ns_id; 1564 break; 1565 case NVME_IOCTL_IO_CMD: 1566 ret = nvme_user_cmd(ns->ctrl, ns, argp); 1567 break; 1568 case NVME_IOCTL_SUBMIT_IO: 1569 ret = nvme_submit_io(ns, argp); 1570 break; 1571 case NVME_IOCTL_IO64_CMD: 1572 ret = nvme_user_cmd64(ns->ctrl, ns, argp); 1573 break; 1574 default: 1575 if (ns->ndev) 1576 ret = nvme_nvm_ioctl(ns, cmd, arg); 1577 else 1578 ret = -ENOTTY; 1579 } 1580 1581 nvme_put_ns_from_disk(head, srcu_idx); 1582 return ret; 1583 } 1584 1585 static int nvme_open(struct block_device *bdev, fmode_t mode) 1586 { 1587 struct nvme_ns *ns = bdev->bd_disk->private_data; 1588 1589 #ifdef CONFIG_NVME_MULTIPATH 1590 /* should never be called due to GENHD_FL_HIDDEN */ 1591 if (WARN_ON_ONCE(ns->head->disk)) 1592 goto fail; 1593 #endif 1594 if (!kref_get_unless_zero(&ns->kref)) 1595 goto fail; 1596 if (!try_module_get(ns->ctrl->ops->module)) 1597 goto fail_put_ns; 1598 1599 return 0; 1600 1601 fail_put_ns: 1602 nvme_put_ns(ns); 1603 fail: 1604 return -ENXIO; 1605 } 1606 1607 static void nvme_release(struct gendisk *disk, fmode_t mode) 1608 { 1609 struct nvme_ns *ns = disk->private_data; 1610 1611 module_put(ns->ctrl->ops->module); 1612 nvme_put_ns(ns); 1613 } 1614 1615 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1616 { 1617 /* some standard values */ 1618 geo->heads = 1 << 6; 1619 geo->sectors = 1 << 5; 1620 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1621 return 0; 1622 } 1623 1624 #ifdef CONFIG_BLK_DEV_INTEGRITY 1625 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) 1626 { 1627 struct blk_integrity integrity; 1628 1629 memset(&integrity, 0, sizeof(integrity)); 1630 switch (pi_type) { 1631 case NVME_NS_DPS_PI_TYPE3: 1632 integrity.profile = &t10_pi_type3_crc; 1633 integrity.tag_size = sizeof(u16) + sizeof(u32); 1634 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1635 break; 1636 case NVME_NS_DPS_PI_TYPE1: 1637 case NVME_NS_DPS_PI_TYPE2: 1638 integrity.profile = &t10_pi_type1_crc; 1639 integrity.tag_size = sizeof(u16); 1640 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1641 break; 1642 default: 1643 integrity.profile = NULL; 1644 break; 1645 } 1646 integrity.tuple_size = ms; 1647 blk_integrity_register(disk, &integrity); 1648 blk_queue_max_integrity_segments(disk->queue, 1); 1649 } 1650 #else 1651 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) 1652 { 1653 } 1654 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1655 1656 static void nvme_set_chunk_size(struct nvme_ns *ns) 1657 { 1658 u32 chunk_size = nvme_lba_to_sect(ns, ns->noiob); 1659 blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); 1660 } 1661 1662 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) 1663 { 1664 struct nvme_ctrl *ctrl = ns->ctrl; 1665 struct request_queue *queue = disk->queue; 1666 u32 size = queue_logical_block_size(queue); 1667 1668 if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { 1669 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue); 1670 return; 1671 } 1672 1673 if (ctrl->nr_streams && ns->sws && ns->sgs) 1674 size *= ns->sws * ns->sgs; 1675 1676 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1677 NVME_DSM_MAX_RANGES); 1678 1679 queue->limits.discard_alignment = 0; 1680 queue->limits.discard_granularity = size; 1681 1682 /* If discard is already enabled, don't reset queue limits */ 1683 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue)) 1684 return; 1685 1686 blk_queue_max_discard_sectors(queue, UINT_MAX); 1687 blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); 1688 1689 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1690 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1691 } 1692 1693 static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) 1694 { 1695 u64 max_blocks; 1696 1697 if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || 1698 (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 1699 return; 1700 /* 1701 * Even though NVMe spec explicitly states that MDTS is not 1702 * applicable to the write-zeroes:- "The restriction does not apply to 1703 * commands that do not transfer data between the host and the 1704 * controller (e.g., Write Uncorrectable ro Write Zeroes command).". 1705 * In order to be more cautious use controller's max_hw_sectors value 1706 * to configure the maximum sectors for the write-zeroes which is 1707 * configured based on the controller's MDTS field in the 1708 * nvme_init_identify() if available. 1709 */ 1710 if (ns->ctrl->max_hw_sectors == UINT_MAX) 1711 max_blocks = (u64)USHRT_MAX + 1; 1712 else 1713 max_blocks = ns->ctrl->max_hw_sectors + 1; 1714 1715 blk_queue_max_write_zeroes_sectors(disk->queue, 1716 nvme_lba_to_sect(ns, max_blocks)); 1717 } 1718 1719 static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, 1720 struct nvme_id_ns *id, struct nvme_ns_ids *ids) 1721 { 1722 int ret = 0; 1723 1724 memset(ids, 0, sizeof(*ids)); 1725 1726 if (ctrl->vs >= NVME_VS(1, 1, 0)) 1727 memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); 1728 if (ctrl->vs >= NVME_VS(1, 2, 0)) 1729 memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); 1730 if (ctrl->vs >= NVME_VS(1, 3, 0)) { 1731 /* Don't treat error as fatal we potentially 1732 * already have a NGUID or EUI-64 1733 */ 1734 ret = nvme_identify_ns_descs(ctrl, nsid, ids); 1735 if (ret) 1736 dev_warn(ctrl->device, 1737 "Identify Descriptors failed (%d)\n", ret); 1738 if (ret > 0) 1739 ret = 0; 1740 } 1741 return ret; 1742 } 1743 1744 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) 1745 { 1746 return !uuid_is_null(&ids->uuid) || 1747 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) || 1748 memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 1749 } 1750 1751 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1752 { 1753 return uuid_equal(&a->uuid, &b->uuid) && 1754 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1755 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0; 1756 } 1757 1758 static void nvme_update_disk_info(struct gendisk *disk, 1759 struct nvme_ns *ns, struct nvme_id_ns *id) 1760 { 1761 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); 1762 unsigned short bs = 1 << ns->lba_shift; 1763 u32 atomic_bs, phys_bs, io_opt; 1764 1765 if (ns->lba_shift > PAGE_SHIFT) { 1766 /* unsupported block size, set capacity to 0 later */ 1767 bs = (1 << 9); 1768 } 1769 blk_mq_freeze_queue(disk->queue); 1770 blk_integrity_unregister(disk); 1771 1772 if (id->nabo == 0) { 1773 /* 1774 * Bit 1 indicates whether NAWUPF is defined for this namespace 1775 * and whether it should be used instead of AWUPF. If NAWUPF == 1776 * 0 then AWUPF must be used instead. 1777 */ 1778 if (id->nsfeat & (1 << 1) && id->nawupf) 1779 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; 1780 else 1781 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; 1782 } else { 1783 atomic_bs = bs; 1784 } 1785 phys_bs = bs; 1786 io_opt = bs; 1787 if (id->nsfeat & (1 << 4)) { 1788 /* NPWG = Namespace Preferred Write Granularity */ 1789 phys_bs *= 1 + le16_to_cpu(id->npwg); 1790 /* NOWS = Namespace Optimal Write Size */ 1791 io_opt *= 1 + le16_to_cpu(id->nows); 1792 } 1793 1794 blk_queue_logical_block_size(disk->queue, bs); 1795 /* 1796 * Linux filesystems assume writing a single physical block is 1797 * an atomic operation. Hence limit the physical block size to the 1798 * value of the Atomic Write Unit Power Fail parameter. 1799 */ 1800 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); 1801 blk_queue_io_min(disk->queue, phys_bs); 1802 blk_queue_io_opt(disk->queue, io_opt); 1803 1804 if (ns->ms && !ns->ext && 1805 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1806 nvme_init_integrity(disk, ns->ms, ns->pi_type); 1807 if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) || 1808 ns->lba_shift > PAGE_SHIFT) 1809 capacity = 0; 1810 1811 set_capacity(disk, capacity); 1812 1813 nvme_config_discard(disk, ns); 1814 nvme_config_write_zeroes(disk, ns); 1815 1816 if (id->nsattr & (1 << 0)) 1817 set_disk_ro(disk, true); 1818 else 1819 set_disk_ro(disk, false); 1820 1821 blk_mq_unfreeze_queue(disk->queue); 1822 } 1823 1824 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) 1825 { 1826 struct nvme_ns *ns = disk->private_data; 1827 1828 /* 1829 * If identify namespace failed, use default 512 byte block size so 1830 * block layer can use before failing read/write for 0 capacity. 1831 */ 1832 ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; 1833 if (ns->lba_shift == 0) 1834 ns->lba_shift = 9; 1835 ns->noiob = le16_to_cpu(id->noiob); 1836 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1837 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 1838 /* the PI implementation requires metadata equal t10 pi tuple size */ 1839 if (ns->ms == sizeof(struct t10_pi_tuple)) 1840 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1841 else 1842 ns->pi_type = 0; 1843 1844 if (ns->noiob) 1845 nvme_set_chunk_size(ns); 1846 nvme_update_disk_info(disk, ns, id); 1847 #ifdef CONFIG_NVME_MULTIPATH 1848 if (ns->head->disk) { 1849 nvme_update_disk_info(ns->head->disk, ns, id); 1850 blk_queue_stack_limits(ns->head->disk->queue, ns->queue); 1851 revalidate_disk(ns->head->disk); 1852 } 1853 #endif 1854 } 1855 1856 static int nvme_revalidate_disk(struct gendisk *disk) 1857 { 1858 struct nvme_ns *ns = disk->private_data; 1859 struct nvme_ctrl *ctrl = ns->ctrl; 1860 struct nvme_id_ns *id; 1861 struct nvme_ns_ids ids; 1862 int ret = 0; 1863 1864 if (test_bit(NVME_NS_DEAD, &ns->flags)) { 1865 set_capacity(disk, 0); 1866 return -ENODEV; 1867 } 1868 1869 ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id); 1870 if (ret) 1871 goto out; 1872 1873 if (id->ncap == 0) { 1874 ret = -ENODEV; 1875 goto free_id; 1876 } 1877 1878 __nvme_revalidate_disk(disk, id); 1879 ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); 1880 if (ret) 1881 goto free_id; 1882 1883 if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) { 1884 dev_err(ctrl->device, 1885 "identifiers changed for nsid %d\n", ns->head->ns_id); 1886 ret = -ENODEV; 1887 } 1888 1889 free_id: 1890 kfree(id); 1891 out: 1892 /* 1893 * Only fail the function if we got a fatal error back from the 1894 * device, otherwise ignore the error and just move on. 1895 */ 1896 if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR))) 1897 ret = 0; 1898 else if (ret > 0) 1899 ret = blk_status_to_errno(nvme_error_status(ret)); 1900 return ret; 1901 } 1902 1903 static char nvme_pr_type(enum pr_type type) 1904 { 1905 switch (type) { 1906 case PR_WRITE_EXCLUSIVE: 1907 return 1; 1908 case PR_EXCLUSIVE_ACCESS: 1909 return 2; 1910 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1911 return 3; 1912 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1913 return 4; 1914 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1915 return 5; 1916 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1917 return 6; 1918 default: 1919 return 0; 1920 } 1921 }; 1922 1923 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 1924 u64 key, u64 sa_key, u8 op) 1925 { 1926 struct nvme_ns_head *head = NULL; 1927 struct nvme_ns *ns; 1928 struct nvme_command c; 1929 int srcu_idx, ret; 1930 u8 data[16] = { 0, }; 1931 1932 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1933 if (unlikely(!ns)) 1934 return -EWOULDBLOCK; 1935 1936 put_unaligned_le64(key, &data[0]); 1937 put_unaligned_le64(sa_key, &data[8]); 1938 1939 memset(&c, 0, sizeof(c)); 1940 c.common.opcode = op; 1941 c.common.nsid = cpu_to_le32(ns->head->ns_id); 1942 c.common.cdw10 = cpu_to_le32(cdw10); 1943 1944 ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); 1945 nvme_put_ns_from_disk(head, srcu_idx); 1946 return ret; 1947 } 1948 1949 static int nvme_pr_register(struct block_device *bdev, u64 old, 1950 u64 new, unsigned flags) 1951 { 1952 u32 cdw10; 1953 1954 if (flags & ~PR_FL_IGNORE_KEY) 1955 return -EOPNOTSUPP; 1956 1957 cdw10 = old ? 2 : 0; 1958 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 1959 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 1960 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 1961 } 1962 1963 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 1964 enum pr_type type, unsigned flags) 1965 { 1966 u32 cdw10; 1967 1968 if (flags & ~PR_FL_IGNORE_KEY) 1969 return -EOPNOTSUPP; 1970 1971 cdw10 = nvme_pr_type(type) << 8; 1972 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 1973 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 1974 } 1975 1976 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 1977 enum pr_type type, bool abort) 1978 { 1979 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); 1980 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 1981 } 1982 1983 static int nvme_pr_clear(struct block_device *bdev, u64 key) 1984 { 1985 u32 cdw10 = 1 | (key ? 1 << 3 : 0); 1986 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); 1987 } 1988 1989 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1990 { 1991 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0); 1992 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 1993 } 1994 1995 static const struct pr_ops nvme_pr_ops = { 1996 .pr_register = nvme_pr_register, 1997 .pr_reserve = nvme_pr_reserve, 1998 .pr_release = nvme_pr_release, 1999 .pr_preempt = nvme_pr_preempt, 2000 .pr_clear = nvme_pr_clear, 2001 }; 2002 2003 #ifdef CONFIG_BLK_SED_OPAL 2004 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 2005 bool send) 2006 { 2007 struct nvme_ctrl *ctrl = data; 2008 struct nvme_command cmd; 2009 2010 memset(&cmd, 0, sizeof(cmd)); 2011 if (send) 2012 cmd.common.opcode = nvme_admin_security_send; 2013 else 2014 cmd.common.opcode = nvme_admin_security_recv; 2015 cmd.common.nsid = 0; 2016 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 2017 cmd.common.cdw11 = cpu_to_le32(len); 2018 2019 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 2020 ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false); 2021 } 2022 EXPORT_SYMBOL_GPL(nvme_sec_submit); 2023 #endif /* CONFIG_BLK_SED_OPAL */ 2024 2025 static const struct block_device_operations nvme_fops = { 2026 .owner = THIS_MODULE, 2027 .ioctl = nvme_ioctl, 2028 .compat_ioctl = nvme_ioctl, 2029 .open = nvme_open, 2030 .release = nvme_release, 2031 .getgeo = nvme_getgeo, 2032 .revalidate_disk= nvme_revalidate_disk, 2033 .pr_ops = &nvme_pr_ops, 2034 }; 2035 2036 #ifdef CONFIG_NVME_MULTIPATH 2037 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) 2038 { 2039 struct nvme_ns_head *head = bdev->bd_disk->private_data; 2040 2041 if (!kref_get_unless_zero(&head->ref)) 2042 return -ENXIO; 2043 return 0; 2044 } 2045 2046 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) 2047 { 2048 nvme_put_ns_head(disk->private_data); 2049 } 2050 2051 const struct block_device_operations nvme_ns_head_ops = { 2052 .owner = THIS_MODULE, 2053 .open = nvme_ns_head_open, 2054 .release = nvme_ns_head_release, 2055 .ioctl = nvme_ioctl, 2056 .compat_ioctl = nvme_ioctl, 2057 .getgeo = nvme_getgeo, 2058 .pr_ops = &nvme_pr_ops, 2059 }; 2060 #endif /* CONFIG_NVME_MULTIPATH */ 2061 2062 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 2063 { 2064 unsigned long timeout = 2065 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 2066 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 2067 int ret; 2068 2069 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2070 if (csts == ~0) 2071 return -ENODEV; 2072 if ((csts & NVME_CSTS_RDY) == bit) 2073 break; 2074 2075 msleep(100); 2076 if (fatal_signal_pending(current)) 2077 return -EINTR; 2078 if (time_after(jiffies, timeout)) { 2079 dev_err(ctrl->device, 2080 "Device not ready; aborting %s\n", enabled ? 2081 "initialisation" : "reset"); 2082 return -ENODEV; 2083 } 2084 } 2085 2086 return ret; 2087 } 2088 2089 /* 2090 * If the device has been passed off to us in an enabled state, just clear 2091 * the enabled bit. The spec says we should set the 'shutdown notification 2092 * bits', but doing so may cause the device to complete commands to the 2093 * admin queue ... and we don't know what memory that might be pointing at! 2094 */ 2095 int nvme_disable_ctrl(struct nvme_ctrl *ctrl) 2096 { 2097 int ret; 2098 2099 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2100 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 2101 2102 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2103 if (ret) 2104 return ret; 2105 2106 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 2107 msleep(NVME_QUIRK_DELAY_AMOUNT); 2108 2109 return nvme_wait_ready(ctrl, ctrl->cap, false); 2110 } 2111 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 2112 2113 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) 2114 { 2115 /* 2116 * Default to a 4K page size, with the intention to update this 2117 * path in the future to accomodate architectures with differing 2118 * kernel and IO page sizes. 2119 */ 2120 unsigned dev_page_min, page_shift = 12; 2121 int ret; 2122 2123 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2124 if (ret) { 2125 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2126 return ret; 2127 } 2128 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2129 2130 if (page_shift < dev_page_min) { 2131 dev_err(ctrl->device, 2132 "Minimum device page size %u too large for host (%u)\n", 2133 1 << dev_page_min, 1 << page_shift); 2134 return -ENODEV; 2135 } 2136 2137 ctrl->page_size = 1 << page_shift; 2138 2139 ctrl->ctrl_config = NVME_CC_CSS_NVM; 2140 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; 2141 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 2142 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 2143 ctrl->ctrl_config |= NVME_CC_ENABLE; 2144 2145 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2146 if (ret) 2147 return ret; 2148 return nvme_wait_ready(ctrl, ctrl->cap, true); 2149 } 2150 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 2151 2152 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 2153 { 2154 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); 2155 u32 csts; 2156 int ret; 2157 2158 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2159 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 2160 2161 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2162 if (ret) 2163 return ret; 2164 2165 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2166 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 2167 break; 2168 2169 msleep(100); 2170 if (fatal_signal_pending(current)) 2171 return -EINTR; 2172 if (time_after(jiffies, timeout)) { 2173 dev_err(ctrl->device, 2174 "Device shutdown incomplete; abort shutdown\n"); 2175 return -ENODEV; 2176 } 2177 } 2178 2179 return ret; 2180 } 2181 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 2182 2183 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 2184 struct request_queue *q) 2185 { 2186 bool vwc = false; 2187 2188 if (ctrl->max_hw_sectors) { 2189 u32 max_segments = 2190 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 2191 2192 max_segments = min_not_zero(max_segments, ctrl->max_segments); 2193 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 2194 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 2195 } 2196 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 2197 is_power_of_2(ctrl->max_hw_sectors)) 2198 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); 2199 blk_queue_virt_boundary(q, ctrl->page_size - 1); 2200 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 2201 vwc = true; 2202 blk_queue_write_cache(q, vwc, vwc); 2203 } 2204 2205 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 2206 { 2207 __le64 ts; 2208 int ret; 2209 2210 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 2211 return 0; 2212 2213 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 2214 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 2215 NULL); 2216 if (ret) 2217 dev_warn_once(ctrl->device, 2218 "could not set timestamp (%d)\n", ret); 2219 return ret; 2220 } 2221 2222 static int nvme_configure_acre(struct nvme_ctrl *ctrl) 2223 { 2224 struct nvme_feat_host_behavior *host; 2225 int ret; 2226 2227 /* Don't bother enabling the feature if retry delay is not reported */ 2228 if (!ctrl->crdt[0]) 2229 return 0; 2230 2231 host = kzalloc(sizeof(*host), GFP_KERNEL); 2232 if (!host) 2233 return 0; 2234 2235 host->acre = NVME_ENABLE_ACRE; 2236 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2237 host, sizeof(*host), NULL); 2238 kfree(host); 2239 return ret; 2240 } 2241 2242 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2243 { 2244 /* 2245 * APST (Autonomous Power State Transition) lets us program a 2246 * table of power state transitions that the controller will 2247 * perform automatically. We configure it with a simple 2248 * heuristic: we are willing to spend at most 2% of the time 2249 * transitioning between power states. Therefore, when running 2250 * in any given state, we will enter the next lower-power 2251 * non-operational state after waiting 50 * (enlat + exlat) 2252 * microseconds, as long as that state's exit latency is under 2253 * the requested maximum latency. 2254 * 2255 * We will not autonomously enter any non-operational state for 2256 * which the total latency exceeds ps_max_latency_us. Users 2257 * can set ps_max_latency_us to zero to turn off APST. 2258 */ 2259 2260 unsigned apste; 2261 struct nvme_feat_auto_pst *table; 2262 u64 max_lat_us = 0; 2263 int max_ps = -1; 2264 int ret; 2265 2266 /* 2267 * If APST isn't supported or if we haven't been initialized yet, 2268 * then don't do anything. 2269 */ 2270 if (!ctrl->apsta) 2271 return 0; 2272 2273 if (ctrl->npss > 31) { 2274 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2275 return 0; 2276 } 2277 2278 table = kzalloc(sizeof(*table), GFP_KERNEL); 2279 if (!table) 2280 return 0; 2281 2282 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2283 /* Turn off APST. */ 2284 apste = 0; 2285 dev_dbg(ctrl->device, "APST disabled\n"); 2286 } else { 2287 __le64 target = cpu_to_le64(0); 2288 int state; 2289 2290 /* 2291 * Walk through all states from lowest- to highest-power. 2292 * According to the spec, lower-numbered states use more 2293 * power. NPSS, despite the name, is the index of the 2294 * lowest-power state, not the number of states. 2295 */ 2296 for (state = (int)ctrl->npss; state >= 0; state--) { 2297 u64 total_latency_us, exit_latency_us, transition_ms; 2298 2299 if (target) 2300 table->entries[state] = target; 2301 2302 /* 2303 * Don't allow transitions to the deepest state 2304 * if it's quirked off. 2305 */ 2306 if (state == ctrl->npss && 2307 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2308 continue; 2309 2310 /* 2311 * Is this state a useful non-operational state for 2312 * higher-power states to autonomously transition to? 2313 */ 2314 if (!(ctrl->psd[state].flags & 2315 NVME_PS_FLAGS_NON_OP_STATE)) 2316 continue; 2317 2318 exit_latency_us = 2319 (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2320 if (exit_latency_us > ctrl->ps_max_latency_us) 2321 continue; 2322 2323 total_latency_us = 2324 exit_latency_us + 2325 le32_to_cpu(ctrl->psd[state].entry_lat); 2326 2327 /* 2328 * This state is good. Use it as the APST idle 2329 * target for higher power states. 2330 */ 2331 transition_ms = total_latency_us + 19; 2332 do_div(transition_ms, 20); 2333 if (transition_ms > (1 << 24) - 1) 2334 transition_ms = (1 << 24) - 1; 2335 2336 target = cpu_to_le64((state << 3) | 2337 (transition_ms << 8)); 2338 2339 if (max_ps == -1) 2340 max_ps = state; 2341 2342 if (total_latency_us > max_lat_us) 2343 max_lat_us = total_latency_us; 2344 } 2345 2346 apste = 1; 2347 2348 if (max_ps == -1) { 2349 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2350 } else { 2351 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2352 max_ps, max_lat_us, (int)sizeof(*table), table); 2353 } 2354 } 2355 2356 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2357 table, sizeof(*table), NULL); 2358 if (ret) 2359 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2360 2361 kfree(table); 2362 return ret; 2363 } 2364 2365 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2366 { 2367 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2368 u64 latency; 2369 2370 switch (val) { 2371 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2372 case PM_QOS_LATENCY_ANY: 2373 latency = U64_MAX; 2374 break; 2375 2376 default: 2377 latency = val; 2378 } 2379 2380 if (ctrl->ps_max_latency_us != latency) { 2381 ctrl->ps_max_latency_us = latency; 2382 nvme_configure_apst(ctrl); 2383 } 2384 } 2385 2386 struct nvme_core_quirk_entry { 2387 /* 2388 * NVMe model and firmware strings are padded with spaces. For 2389 * simplicity, strings in the quirk table are padded with NULLs 2390 * instead. 2391 */ 2392 u16 vid; 2393 const char *mn; 2394 const char *fr; 2395 unsigned long quirks; 2396 }; 2397 2398 static const struct nvme_core_quirk_entry core_quirks[] = { 2399 { 2400 /* 2401 * This Toshiba device seems to die using any APST states. See: 2402 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2403 */ 2404 .vid = 0x1179, 2405 .mn = "THNSF5256GPUK TOSHIBA", 2406 .quirks = NVME_QUIRK_NO_APST, 2407 }, 2408 { 2409 /* 2410 * This LiteON CL1-3D*-Q11 firmware version has a race 2411 * condition associated with actions related to suspend to idle 2412 * LiteON has resolved the problem in future firmware 2413 */ 2414 .vid = 0x14a4, 2415 .fr = "22301111", 2416 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2417 } 2418 }; 2419 2420 /* match is null-terminated but idstr is space-padded. */ 2421 static bool string_matches(const char *idstr, const char *match, size_t len) 2422 { 2423 size_t matchlen; 2424 2425 if (!match) 2426 return true; 2427 2428 matchlen = strlen(match); 2429 WARN_ON_ONCE(matchlen > len); 2430 2431 if (memcmp(idstr, match, matchlen)) 2432 return false; 2433 2434 for (; matchlen < len; matchlen++) 2435 if (idstr[matchlen] != ' ') 2436 return false; 2437 2438 return true; 2439 } 2440 2441 static bool quirk_matches(const struct nvme_id_ctrl *id, 2442 const struct nvme_core_quirk_entry *q) 2443 { 2444 return q->vid == le16_to_cpu(id->vid) && 2445 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2446 string_matches(id->fr, q->fr, sizeof(id->fr)); 2447 } 2448 2449 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2450 struct nvme_id_ctrl *id) 2451 { 2452 size_t nqnlen; 2453 int off; 2454 2455 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2456 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2457 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2458 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2459 return; 2460 } 2461 2462 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2463 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2464 } 2465 2466 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 2467 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2468 "nqn.2014.08.org.nvmexpress:%04x%04x", 2469 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2470 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2471 off += sizeof(id->sn); 2472 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2473 off += sizeof(id->mn); 2474 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2475 } 2476 2477 static void nvme_release_subsystem(struct device *dev) 2478 { 2479 struct nvme_subsystem *subsys = 2480 container_of(dev, struct nvme_subsystem, dev); 2481 2482 if (subsys->instance >= 0) 2483 ida_simple_remove(&nvme_instance_ida, subsys->instance); 2484 kfree(subsys); 2485 } 2486 2487 static void nvme_destroy_subsystem(struct kref *ref) 2488 { 2489 struct nvme_subsystem *subsys = 2490 container_of(ref, struct nvme_subsystem, ref); 2491 2492 mutex_lock(&nvme_subsystems_lock); 2493 list_del(&subsys->entry); 2494 mutex_unlock(&nvme_subsystems_lock); 2495 2496 ida_destroy(&subsys->ns_ida); 2497 device_del(&subsys->dev); 2498 put_device(&subsys->dev); 2499 } 2500 2501 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2502 { 2503 kref_put(&subsys->ref, nvme_destroy_subsystem); 2504 } 2505 2506 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2507 { 2508 struct nvme_subsystem *subsys; 2509 2510 lockdep_assert_held(&nvme_subsystems_lock); 2511 2512 /* 2513 * Fail matches for discovery subsystems. This results 2514 * in each discovery controller bound to a unique subsystem. 2515 * This avoids issues with validating controller values 2516 * that can only be true when there is a single unique subsystem. 2517 * There may be multiple and completely independent entities 2518 * that provide discovery controllers. 2519 */ 2520 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) 2521 return NULL; 2522 2523 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2524 if (strcmp(subsys->subnqn, subsysnqn)) 2525 continue; 2526 if (!kref_get_unless_zero(&subsys->ref)) 2527 continue; 2528 return subsys; 2529 } 2530 2531 return NULL; 2532 } 2533 2534 #define SUBSYS_ATTR_RO(_name, _mode, _show) \ 2535 struct device_attribute subsys_attr_##_name = \ 2536 __ATTR(_name, _mode, _show, NULL) 2537 2538 static ssize_t nvme_subsys_show_nqn(struct device *dev, 2539 struct device_attribute *attr, 2540 char *buf) 2541 { 2542 struct nvme_subsystem *subsys = 2543 container_of(dev, struct nvme_subsystem, dev); 2544 2545 return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); 2546 } 2547 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2548 2549 #define nvme_subsys_show_str_function(field) \ 2550 static ssize_t subsys_##field##_show(struct device *dev, \ 2551 struct device_attribute *attr, char *buf) \ 2552 { \ 2553 struct nvme_subsystem *subsys = \ 2554 container_of(dev, struct nvme_subsystem, dev); \ 2555 return sprintf(buf, "%.*s\n", \ 2556 (int)sizeof(subsys->field), subsys->field); \ 2557 } \ 2558 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 2559 2560 nvme_subsys_show_str_function(model); 2561 nvme_subsys_show_str_function(serial); 2562 nvme_subsys_show_str_function(firmware_rev); 2563 2564 static struct attribute *nvme_subsys_attrs[] = { 2565 &subsys_attr_model.attr, 2566 &subsys_attr_serial.attr, 2567 &subsys_attr_firmware_rev.attr, 2568 &subsys_attr_subsysnqn.attr, 2569 #ifdef CONFIG_NVME_MULTIPATH 2570 &subsys_attr_iopolicy.attr, 2571 #endif 2572 NULL, 2573 }; 2574 2575 static struct attribute_group nvme_subsys_attrs_group = { 2576 .attrs = nvme_subsys_attrs, 2577 }; 2578 2579 static const struct attribute_group *nvme_subsys_attrs_groups[] = { 2580 &nvme_subsys_attrs_group, 2581 NULL, 2582 }; 2583 2584 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 2585 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2586 { 2587 struct nvme_ctrl *tmp; 2588 2589 lockdep_assert_held(&nvme_subsystems_lock); 2590 2591 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { 2592 if (tmp->state == NVME_CTRL_DELETING || 2593 tmp->state == NVME_CTRL_DEAD) 2594 continue; 2595 2596 if (tmp->cntlid == ctrl->cntlid) { 2597 dev_err(ctrl->device, 2598 "Duplicate cntlid %u with %s, rejecting\n", 2599 ctrl->cntlid, dev_name(tmp->device)); 2600 return false; 2601 } 2602 2603 if ((id->cmic & (1 << 1)) || 2604 (ctrl->opts && ctrl->opts->discovery_nqn)) 2605 continue; 2606 2607 dev_err(ctrl->device, 2608 "Subsystem does not support multiple controllers\n"); 2609 return false; 2610 } 2611 2612 return true; 2613 } 2614 2615 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2616 { 2617 struct nvme_subsystem *subsys, *found; 2618 int ret; 2619 2620 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2621 if (!subsys) 2622 return -ENOMEM; 2623 2624 subsys->instance = -1; 2625 mutex_init(&subsys->lock); 2626 kref_init(&subsys->ref); 2627 INIT_LIST_HEAD(&subsys->ctrls); 2628 INIT_LIST_HEAD(&subsys->nsheads); 2629 nvme_init_subnqn(subsys, ctrl, id); 2630 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2631 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2632 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); 2633 subsys->vendor_id = le16_to_cpu(id->vid); 2634 subsys->cmic = id->cmic; 2635 subsys->awupf = le16_to_cpu(id->awupf); 2636 #ifdef CONFIG_NVME_MULTIPATH 2637 subsys->iopolicy = NVME_IOPOLICY_NUMA; 2638 #endif 2639 2640 subsys->dev.class = nvme_subsys_class; 2641 subsys->dev.release = nvme_release_subsystem; 2642 subsys->dev.groups = nvme_subsys_attrs_groups; 2643 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); 2644 device_initialize(&subsys->dev); 2645 2646 mutex_lock(&nvme_subsystems_lock); 2647 found = __nvme_find_get_subsystem(subsys->subnqn); 2648 if (found) { 2649 put_device(&subsys->dev); 2650 subsys = found; 2651 2652 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 2653 ret = -EINVAL; 2654 goto out_put_subsystem; 2655 } 2656 } else { 2657 ret = device_add(&subsys->dev); 2658 if (ret) { 2659 dev_err(ctrl->device, 2660 "failed to register subsystem device.\n"); 2661 put_device(&subsys->dev); 2662 goto out_unlock; 2663 } 2664 ida_init(&subsys->ns_ida); 2665 list_add_tail(&subsys->entry, &nvme_subsystems); 2666 } 2667 2668 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2669 dev_name(ctrl->device)); 2670 if (ret) { 2671 dev_err(ctrl->device, 2672 "failed to create sysfs link from subsystem.\n"); 2673 goto out_put_subsystem; 2674 } 2675 2676 if (!found) 2677 subsys->instance = ctrl->instance; 2678 ctrl->subsys = subsys; 2679 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2680 mutex_unlock(&nvme_subsystems_lock); 2681 return 0; 2682 2683 out_put_subsystem: 2684 nvme_put_subsystem(subsys); 2685 out_unlock: 2686 mutex_unlock(&nvme_subsystems_lock); 2687 return ret; 2688 } 2689 2690 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, 2691 void *log, size_t size, u64 offset) 2692 { 2693 struct nvme_command c = { }; 2694 unsigned long dwlen = size / 4 - 1; 2695 2696 c.get_log_page.opcode = nvme_admin_get_log_page; 2697 c.get_log_page.nsid = cpu_to_le32(nsid); 2698 c.get_log_page.lid = log_page; 2699 c.get_log_page.lsp = lsp; 2700 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 2701 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 2702 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 2703 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 2704 2705 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2706 } 2707 2708 static int nvme_get_effects_log(struct nvme_ctrl *ctrl) 2709 { 2710 int ret; 2711 2712 if (!ctrl->effects) 2713 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); 2714 2715 if (!ctrl->effects) 2716 return 0; 2717 2718 ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, 2719 ctrl->effects, sizeof(*ctrl->effects), 0); 2720 if (ret) { 2721 kfree(ctrl->effects); 2722 ctrl->effects = NULL; 2723 } 2724 return ret; 2725 } 2726 2727 /* 2728 * Initialize the cached copies of the Identify data and various controller 2729 * register in our nvme_ctrl structure. This should be called as soon as 2730 * the admin queue is fully up and running. 2731 */ 2732 int nvme_init_identify(struct nvme_ctrl *ctrl) 2733 { 2734 struct nvme_id_ctrl *id; 2735 int ret, page_shift; 2736 u32 max_hw_sectors; 2737 bool prev_apst_enabled; 2738 2739 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 2740 if (ret) { 2741 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 2742 return ret; 2743 } 2744 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2745 ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); 2746 2747 if (ctrl->vs >= NVME_VS(1, 1, 0)) 2748 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); 2749 2750 ret = nvme_identify_ctrl(ctrl, &id); 2751 if (ret) { 2752 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 2753 return -EIO; 2754 } 2755 2756 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 2757 ret = nvme_get_effects_log(ctrl); 2758 if (ret < 0) 2759 goto out_free; 2760 } 2761 2762 if (!(ctrl->ops->flags & NVME_F_FABRICS)) 2763 ctrl->cntlid = le16_to_cpu(id->cntlid); 2764 2765 if (!ctrl->identified) { 2766 int i; 2767 2768 ret = nvme_init_subsystem(ctrl, id); 2769 if (ret) 2770 goto out_free; 2771 2772 /* 2773 * Check for quirks. Quirk can depend on firmware version, 2774 * so, in principle, the set of quirks present can change 2775 * across a reset. As a possible future enhancement, we 2776 * could re-scan for quirks every time we reinitialize 2777 * the device, but we'd have to make sure that the driver 2778 * behaves intelligently if the quirks change. 2779 */ 2780 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 2781 if (quirk_matches(id, &core_quirks[i])) 2782 ctrl->quirks |= core_quirks[i].quirks; 2783 } 2784 } 2785 2786 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 2787 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 2788 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 2789 } 2790 2791 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 2792 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 2793 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 2794 2795 ctrl->oacs = le16_to_cpu(id->oacs); 2796 ctrl->oncs = le16_to_cpu(id->oncs); 2797 ctrl->mtfa = le16_to_cpu(id->mtfa); 2798 ctrl->oaes = le32_to_cpu(id->oaes); 2799 ctrl->wctemp = le16_to_cpu(id->wctemp); 2800 ctrl->cctemp = le16_to_cpu(id->cctemp); 2801 2802 atomic_set(&ctrl->abort_limit, id->acl + 1); 2803 ctrl->vwc = id->vwc; 2804 if (id->mdts) 2805 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 2806 else 2807 max_hw_sectors = UINT_MAX; 2808 ctrl->max_hw_sectors = 2809 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 2810 2811 nvme_set_queue_limits(ctrl, ctrl->admin_q); 2812 ctrl->sgls = le32_to_cpu(id->sgls); 2813 ctrl->kas = le16_to_cpu(id->kas); 2814 ctrl->max_namespaces = le32_to_cpu(id->mnan); 2815 ctrl->ctratt = le32_to_cpu(id->ctratt); 2816 2817 if (id->rtd3e) { 2818 /* us -> s */ 2819 u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000; 2820 2821 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 2822 shutdown_timeout, 60); 2823 2824 if (ctrl->shutdown_timeout != shutdown_timeout) 2825 dev_info(ctrl->device, 2826 "Shutdown timeout set to %u seconds\n", 2827 ctrl->shutdown_timeout); 2828 } else 2829 ctrl->shutdown_timeout = shutdown_timeout; 2830 2831 ctrl->npss = id->npss; 2832 ctrl->apsta = id->apsta; 2833 prev_apst_enabled = ctrl->apst_enabled; 2834 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 2835 if (force_apst && id->apsta) { 2836 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 2837 ctrl->apst_enabled = true; 2838 } else { 2839 ctrl->apst_enabled = false; 2840 } 2841 } else { 2842 ctrl->apst_enabled = id->apsta; 2843 } 2844 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 2845 2846 if (ctrl->ops->flags & NVME_F_FABRICS) { 2847 ctrl->icdoff = le16_to_cpu(id->icdoff); 2848 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 2849 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 2850 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 2851 2852 /* 2853 * In fabrics we need to verify the cntlid matches the 2854 * admin connect 2855 */ 2856 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 2857 dev_err(ctrl->device, 2858 "Mismatching cntlid: Connect %u vs Identify " 2859 "%u, rejecting\n", 2860 ctrl->cntlid, le16_to_cpu(id->cntlid)); 2861 ret = -EINVAL; 2862 goto out_free; 2863 } 2864 2865 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 2866 dev_err(ctrl->device, 2867 "keep-alive support is mandatory for fabrics\n"); 2868 ret = -EINVAL; 2869 goto out_free; 2870 } 2871 } else { 2872 ctrl->hmpre = le32_to_cpu(id->hmpre); 2873 ctrl->hmmin = le32_to_cpu(id->hmmin); 2874 ctrl->hmminds = le32_to_cpu(id->hmminds); 2875 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 2876 } 2877 2878 ret = nvme_mpath_init(ctrl, id); 2879 kfree(id); 2880 2881 if (ret < 0) 2882 return ret; 2883 2884 if (ctrl->apst_enabled && !prev_apst_enabled) 2885 dev_pm_qos_expose_latency_tolerance(ctrl->device); 2886 else if (!ctrl->apst_enabled && prev_apst_enabled) 2887 dev_pm_qos_hide_latency_tolerance(ctrl->device); 2888 2889 ret = nvme_configure_apst(ctrl); 2890 if (ret < 0) 2891 return ret; 2892 2893 ret = nvme_configure_timestamp(ctrl); 2894 if (ret < 0) 2895 return ret; 2896 2897 ret = nvme_configure_directives(ctrl); 2898 if (ret < 0) 2899 return ret; 2900 2901 ret = nvme_configure_acre(ctrl); 2902 if (ret < 0) 2903 return ret; 2904 2905 if (!ctrl->identified) 2906 nvme_hwmon_init(ctrl); 2907 2908 ctrl->identified = true; 2909 2910 return 0; 2911 2912 out_free: 2913 kfree(id); 2914 return ret; 2915 } 2916 EXPORT_SYMBOL_GPL(nvme_init_identify); 2917 2918 static int nvme_dev_open(struct inode *inode, struct file *file) 2919 { 2920 struct nvme_ctrl *ctrl = 2921 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 2922 2923 switch (ctrl->state) { 2924 case NVME_CTRL_LIVE: 2925 break; 2926 default: 2927 return -EWOULDBLOCK; 2928 } 2929 2930 file->private_data = ctrl; 2931 return 0; 2932 } 2933 2934 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 2935 { 2936 struct nvme_ns *ns; 2937 int ret; 2938 2939 down_read(&ctrl->namespaces_rwsem); 2940 if (list_empty(&ctrl->namespaces)) { 2941 ret = -ENOTTY; 2942 goto out_unlock; 2943 } 2944 2945 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 2946 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 2947 dev_warn(ctrl->device, 2948 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 2949 ret = -EINVAL; 2950 goto out_unlock; 2951 } 2952 2953 dev_warn(ctrl->device, 2954 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 2955 kref_get(&ns->kref); 2956 up_read(&ctrl->namespaces_rwsem); 2957 2958 ret = nvme_user_cmd(ctrl, ns, argp); 2959 nvme_put_ns(ns); 2960 return ret; 2961 2962 out_unlock: 2963 up_read(&ctrl->namespaces_rwsem); 2964 return ret; 2965 } 2966 2967 static long nvme_dev_ioctl(struct file *file, unsigned int cmd, 2968 unsigned long arg) 2969 { 2970 struct nvme_ctrl *ctrl = file->private_data; 2971 void __user *argp = (void __user *)arg; 2972 2973 switch (cmd) { 2974 case NVME_IOCTL_ADMIN_CMD: 2975 return nvme_user_cmd(ctrl, NULL, argp); 2976 case NVME_IOCTL_ADMIN64_CMD: 2977 return nvme_user_cmd64(ctrl, NULL, argp); 2978 case NVME_IOCTL_IO_CMD: 2979 return nvme_dev_user_cmd(ctrl, argp); 2980 case NVME_IOCTL_RESET: 2981 dev_warn(ctrl->device, "resetting controller\n"); 2982 return nvme_reset_ctrl_sync(ctrl); 2983 case NVME_IOCTL_SUBSYS_RESET: 2984 return nvme_reset_subsystem(ctrl); 2985 case NVME_IOCTL_RESCAN: 2986 nvme_queue_scan(ctrl); 2987 return 0; 2988 default: 2989 return -ENOTTY; 2990 } 2991 } 2992 2993 static const struct file_operations nvme_dev_fops = { 2994 .owner = THIS_MODULE, 2995 .open = nvme_dev_open, 2996 .unlocked_ioctl = nvme_dev_ioctl, 2997 .compat_ioctl = compat_ptr_ioctl, 2998 }; 2999 3000 static ssize_t nvme_sysfs_reset(struct device *dev, 3001 struct device_attribute *attr, const char *buf, 3002 size_t count) 3003 { 3004 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3005 int ret; 3006 3007 ret = nvme_reset_ctrl_sync(ctrl); 3008 if (ret < 0) 3009 return ret; 3010 return count; 3011 } 3012 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 3013 3014 static ssize_t nvme_sysfs_rescan(struct device *dev, 3015 struct device_attribute *attr, const char *buf, 3016 size_t count) 3017 { 3018 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3019 3020 nvme_queue_scan(ctrl); 3021 return count; 3022 } 3023 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 3024 3025 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 3026 { 3027 struct gendisk *disk = dev_to_disk(dev); 3028 3029 if (disk->fops == &nvme_fops) 3030 return nvme_get_ns_from_dev(dev)->head; 3031 else 3032 return disk->private_data; 3033 } 3034 3035 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 3036 char *buf) 3037 { 3038 struct nvme_ns_head *head = dev_to_ns_head(dev); 3039 struct nvme_ns_ids *ids = &head->ids; 3040 struct nvme_subsystem *subsys = head->subsys; 3041 int serial_len = sizeof(subsys->serial); 3042 int model_len = sizeof(subsys->model); 3043 3044 if (!uuid_is_null(&ids->uuid)) 3045 return sprintf(buf, "uuid.%pU\n", &ids->uuid); 3046 3047 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3048 return sprintf(buf, "eui.%16phN\n", ids->nguid); 3049 3050 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3051 return sprintf(buf, "eui.%8phN\n", ids->eui64); 3052 3053 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 3054 subsys->serial[serial_len - 1] == '\0')) 3055 serial_len--; 3056 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 3057 subsys->model[model_len - 1] == '\0')) 3058 model_len--; 3059 3060 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 3061 serial_len, subsys->serial, model_len, subsys->model, 3062 head->ns_id); 3063 } 3064 static DEVICE_ATTR_RO(wwid); 3065 3066 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 3067 char *buf) 3068 { 3069 return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 3070 } 3071 static DEVICE_ATTR_RO(nguid); 3072 3073 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 3074 char *buf) 3075 { 3076 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3077 3078 /* For backward compatibility expose the NGUID to userspace if 3079 * we have no UUID set 3080 */ 3081 if (uuid_is_null(&ids->uuid)) { 3082 printk_ratelimited(KERN_WARNING 3083 "No UUID available providing old NGUID\n"); 3084 return sprintf(buf, "%pU\n", ids->nguid); 3085 } 3086 return sprintf(buf, "%pU\n", &ids->uuid); 3087 } 3088 static DEVICE_ATTR_RO(uuid); 3089 3090 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 3091 char *buf) 3092 { 3093 return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 3094 } 3095 static DEVICE_ATTR_RO(eui); 3096 3097 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 3098 char *buf) 3099 { 3100 return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 3101 } 3102 static DEVICE_ATTR_RO(nsid); 3103 3104 static struct attribute *nvme_ns_id_attrs[] = { 3105 &dev_attr_wwid.attr, 3106 &dev_attr_uuid.attr, 3107 &dev_attr_nguid.attr, 3108 &dev_attr_eui.attr, 3109 &dev_attr_nsid.attr, 3110 #ifdef CONFIG_NVME_MULTIPATH 3111 &dev_attr_ana_grpid.attr, 3112 &dev_attr_ana_state.attr, 3113 #endif 3114 NULL, 3115 }; 3116 3117 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, 3118 struct attribute *a, int n) 3119 { 3120 struct device *dev = container_of(kobj, struct device, kobj); 3121 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3122 3123 if (a == &dev_attr_uuid.attr) { 3124 if (uuid_is_null(&ids->uuid) && 3125 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3126 return 0; 3127 } 3128 if (a == &dev_attr_nguid.attr) { 3129 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3130 return 0; 3131 } 3132 if (a == &dev_attr_eui.attr) { 3133 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3134 return 0; 3135 } 3136 #ifdef CONFIG_NVME_MULTIPATH 3137 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { 3138 if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */ 3139 return 0; 3140 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) 3141 return 0; 3142 } 3143 #endif 3144 return a->mode; 3145 } 3146 3147 static const struct attribute_group nvme_ns_id_attr_group = { 3148 .attrs = nvme_ns_id_attrs, 3149 .is_visible = nvme_ns_id_attrs_are_visible, 3150 }; 3151 3152 const struct attribute_group *nvme_ns_id_attr_groups[] = { 3153 &nvme_ns_id_attr_group, 3154 #ifdef CONFIG_NVM 3155 &nvme_nvm_attr_group, 3156 #endif 3157 NULL, 3158 }; 3159 3160 #define nvme_show_str_function(field) \ 3161 static ssize_t field##_show(struct device *dev, \ 3162 struct device_attribute *attr, char *buf) \ 3163 { \ 3164 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3165 return sprintf(buf, "%.*s\n", \ 3166 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 3167 } \ 3168 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3169 3170 nvme_show_str_function(model); 3171 nvme_show_str_function(serial); 3172 nvme_show_str_function(firmware_rev); 3173 3174 #define nvme_show_int_function(field) \ 3175 static ssize_t field##_show(struct device *dev, \ 3176 struct device_attribute *attr, char *buf) \ 3177 { \ 3178 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3179 return sprintf(buf, "%d\n", ctrl->field); \ 3180 } \ 3181 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3182 3183 nvme_show_int_function(cntlid); 3184 nvme_show_int_function(numa_node); 3185 nvme_show_int_function(queue_count); 3186 nvme_show_int_function(sqsize); 3187 3188 static ssize_t nvme_sysfs_delete(struct device *dev, 3189 struct device_attribute *attr, const char *buf, 3190 size_t count) 3191 { 3192 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3193 3194 if (device_remove_file_self(dev, attr)) 3195 nvme_delete_ctrl_sync(ctrl); 3196 return count; 3197 } 3198 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 3199 3200 static ssize_t nvme_sysfs_show_transport(struct device *dev, 3201 struct device_attribute *attr, 3202 char *buf) 3203 { 3204 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3205 3206 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); 3207 } 3208 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 3209 3210 static ssize_t nvme_sysfs_show_state(struct device *dev, 3211 struct device_attribute *attr, 3212 char *buf) 3213 { 3214 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3215 static const char *const state_name[] = { 3216 [NVME_CTRL_NEW] = "new", 3217 [NVME_CTRL_LIVE] = "live", 3218 [NVME_CTRL_RESETTING] = "resetting", 3219 [NVME_CTRL_CONNECTING] = "connecting", 3220 [NVME_CTRL_DELETING] = "deleting", 3221 [NVME_CTRL_DEAD] = "dead", 3222 }; 3223 3224 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 3225 state_name[ctrl->state]) 3226 return sprintf(buf, "%s\n", state_name[ctrl->state]); 3227 3228 return sprintf(buf, "unknown state\n"); 3229 } 3230 3231 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 3232 3233 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 3234 struct device_attribute *attr, 3235 char *buf) 3236 { 3237 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3238 3239 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); 3240 } 3241 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 3242 3243 static ssize_t nvme_sysfs_show_address(struct device *dev, 3244 struct device_attribute *attr, 3245 char *buf) 3246 { 3247 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3248 3249 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 3250 } 3251 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 3252 3253 static struct attribute *nvme_dev_attrs[] = { 3254 &dev_attr_reset_controller.attr, 3255 &dev_attr_rescan_controller.attr, 3256 &dev_attr_model.attr, 3257 &dev_attr_serial.attr, 3258 &dev_attr_firmware_rev.attr, 3259 &dev_attr_cntlid.attr, 3260 &dev_attr_delete_controller.attr, 3261 &dev_attr_transport.attr, 3262 &dev_attr_subsysnqn.attr, 3263 &dev_attr_address.attr, 3264 &dev_attr_state.attr, 3265 &dev_attr_numa_node.attr, 3266 &dev_attr_queue_count.attr, 3267 &dev_attr_sqsize.attr, 3268 NULL 3269 }; 3270 3271 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 3272 struct attribute *a, int n) 3273 { 3274 struct device *dev = container_of(kobj, struct device, kobj); 3275 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3276 3277 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 3278 return 0; 3279 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 3280 return 0; 3281 3282 return a->mode; 3283 } 3284 3285 static struct attribute_group nvme_dev_attrs_group = { 3286 .attrs = nvme_dev_attrs, 3287 .is_visible = nvme_dev_attrs_are_visible, 3288 }; 3289 3290 static const struct attribute_group *nvme_dev_attr_groups[] = { 3291 &nvme_dev_attrs_group, 3292 NULL, 3293 }; 3294 3295 static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys, 3296 unsigned nsid) 3297 { 3298 struct nvme_ns_head *h; 3299 3300 lockdep_assert_held(&subsys->lock); 3301 3302 list_for_each_entry(h, &subsys->nsheads, entry) { 3303 if (h->ns_id == nsid && kref_get_unless_zero(&h->ref)) 3304 return h; 3305 } 3306 3307 return NULL; 3308 } 3309 3310 static int __nvme_check_ids(struct nvme_subsystem *subsys, 3311 struct nvme_ns_head *new) 3312 { 3313 struct nvme_ns_head *h; 3314 3315 lockdep_assert_held(&subsys->lock); 3316 3317 list_for_each_entry(h, &subsys->nsheads, entry) { 3318 if (nvme_ns_ids_valid(&new->ids) && 3319 !list_empty(&h->list) && 3320 nvme_ns_ids_equal(&new->ids, &h->ids)) 3321 return -EINVAL; 3322 } 3323 3324 return 0; 3325 } 3326 3327 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 3328 unsigned nsid, struct nvme_id_ns *id) 3329 { 3330 struct nvme_ns_head *head; 3331 size_t size = sizeof(*head); 3332 int ret = -ENOMEM; 3333 3334 #ifdef CONFIG_NVME_MULTIPATH 3335 size += num_possible_nodes() * sizeof(struct nvme_ns *); 3336 #endif 3337 3338 head = kzalloc(size, GFP_KERNEL); 3339 if (!head) 3340 goto out; 3341 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); 3342 if (ret < 0) 3343 goto out_free_head; 3344 head->instance = ret; 3345 INIT_LIST_HEAD(&head->list); 3346 ret = init_srcu_struct(&head->srcu); 3347 if (ret) 3348 goto out_ida_remove; 3349 head->subsys = ctrl->subsys; 3350 head->ns_id = nsid; 3351 kref_init(&head->ref); 3352 3353 ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids); 3354 if (ret) 3355 goto out_cleanup_srcu; 3356 3357 ret = __nvme_check_ids(ctrl->subsys, head); 3358 if (ret) { 3359 dev_err(ctrl->device, 3360 "duplicate IDs for nsid %d\n", nsid); 3361 goto out_cleanup_srcu; 3362 } 3363 3364 ret = nvme_mpath_alloc_disk(ctrl, head); 3365 if (ret) 3366 goto out_cleanup_srcu; 3367 3368 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 3369 3370 kref_get(&ctrl->subsys->ref); 3371 3372 return head; 3373 out_cleanup_srcu: 3374 cleanup_srcu_struct(&head->srcu); 3375 out_ida_remove: 3376 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); 3377 out_free_head: 3378 kfree(head); 3379 out: 3380 if (ret > 0) 3381 ret = blk_status_to_errno(nvme_error_status(ret)); 3382 return ERR_PTR(ret); 3383 } 3384 3385 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, 3386 struct nvme_id_ns *id) 3387 { 3388 struct nvme_ctrl *ctrl = ns->ctrl; 3389 bool is_shared = id->nmic & (1 << 0); 3390 struct nvme_ns_head *head = NULL; 3391 int ret = 0; 3392 3393 mutex_lock(&ctrl->subsys->lock); 3394 if (is_shared) 3395 head = __nvme_find_ns_head(ctrl->subsys, nsid); 3396 if (!head) { 3397 head = nvme_alloc_ns_head(ctrl, nsid, id); 3398 if (IS_ERR(head)) { 3399 ret = PTR_ERR(head); 3400 goto out_unlock; 3401 } 3402 } else { 3403 struct nvme_ns_ids ids; 3404 3405 ret = nvme_report_ns_ids(ctrl, nsid, id, &ids); 3406 if (ret) 3407 goto out_unlock; 3408 3409 if (!nvme_ns_ids_equal(&head->ids, &ids)) { 3410 dev_err(ctrl->device, 3411 "IDs don't match for shared namespace %d\n", 3412 nsid); 3413 ret = -EINVAL; 3414 goto out_unlock; 3415 } 3416 } 3417 3418 list_add_tail(&ns->siblings, &head->list); 3419 ns->head = head; 3420 3421 out_unlock: 3422 mutex_unlock(&ctrl->subsys->lock); 3423 if (ret > 0) 3424 ret = blk_status_to_errno(nvme_error_status(ret)); 3425 return ret; 3426 } 3427 3428 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) 3429 { 3430 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 3431 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 3432 3433 return nsa->head->ns_id - nsb->head->ns_id; 3434 } 3435 3436 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3437 { 3438 struct nvme_ns *ns, *ret = NULL; 3439 3440 down_read(&ctrl->namespaces_rwsem); 3441 list_for_each_entry(ns, &ctrl->namespaces, list) { 3442 if (ns->head->ns_id == nsid) { 3443 if (!kref_get_unless_zero(&ns->kref)) 3444 continue; 3445 ret = ns; 3446 break; 3447 } 3448 if (ns->head->ns_id > nsid) 3449 break; 3450 } 3451 up_read(&ctrl->namespaces_rwsem); 3452 return ret; 3453 } 3454 3455 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns) 3456 { 3457 struct streams_directive_params s; 3458 int ret; 3459 3460 if (!ctrl->nr_streams) 3461 return 0; 3462 3463 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); 3464 if (ret) 3465 return ret; 3466 3467 ns->sws = le32_to_cpu(s.sws); 3468 ns->sgs = le16_to_cpu(s.sgs); 3469 3470 if (ns->sws) { 3471 unsigned int bs = 1 << ns->lba_shift; 3472 3473 blk_queue_io_min(ns->queue, bs * ns->sws); 3474 if (ns->sgs) 3475 blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs); 3476 } 3477 3478 return 0; 3479 } 3480 3481 static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3482 { 3483 struct nvme_ns *ns; 3484 struct gendisk *disk; 3485 struct nvme_id_ns *id; 3486 char disk_name[DISK_NAME_LEN]; 3487 int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret; 3488 3489 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 3490 if (!ns) 3491 return -ENOMEM; 3492 3493 ns->queue = blk_mq_init_queue(ctrl->tagset); 3494 if (IS_ERR(ns->queue)) { 3495 ret = PTR_ERR(ns->queue); 3496 goto out_free_ns; 3497 } 3498 3499 if (ctrl->opts && ctrl->opts->data_digest) 3500 ns->queue->backing_dev_info->capabilities 3501 |= BDI_CAP_STABLE_WRITES; 3502 3503 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 3504 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) 3505 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); 3506 3507 ns->queue->queuedata = ns; 3508 ns->ctrl = ctrl; 3509 3510 kref_init(&ns->kref); 3511 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 3512 3513 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 3514 nvme_set_queue_limits(ctrl, ns->queue); 3515 3516 ret = nvme_identify_ns(ctrl, nsid, &id); 3517 if (ret) 3518 goto out_free_queue; 3519 3520 if (id->ncap == 0) { 3521 ret = -EINVAL; 3522 goto out_free_id; 3523 } 3524 3525 ret = nvme_init_ns_head(ns, nsid, id); 3526 if (ret) 3527 goto out_free_id; 3528 nvme_setup_streams_ns(ctrl, ns); 3529 nvme_set_disk_name(disk_name, ns, ctrl, &flags); 3530 3531 disk = alloc_disk_node(0, node); 3532 if (!disk) { 3533 ret = -ENOMEM; 3534 goto out_unlink_ns; 3535 } 3536 3537 disk->fops = &nvme_fops; 3538 disk->private_data = ns; 3539 disk->queue = ns->queue; 3540 disk->flags = flags; 3541 memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); 3542 ns->disk = disk; 3543 3544 __nvme_revalidate_disk(disk, id); 3545 3546 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { 3547 ret = nvme_nvm_register(ns, disk_name, node); 3548 if (ret) { 3549 dev_warn(ctrl->device, "LightNVM init failure\n"); 3550 goto out_put_disk; 3551 } 3552 } 3553 3554 down_write(&ctrl->namespaces_rwsem); 3555 list_add_tail(&ns->list, &ctrl->namespaces); 3556 up_write(&ctrl->namespaces_rwsem); 3557 3558 nvme_get_ctrl(ctrl); 3559 3560 device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups); 3561 3562 nvme_mpath_add_disk(ns, id); 3563 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); 3564 kfree(id); 3565 3566 return 0; 3567 out_put_disk: 3568 put_disk(ns->disk); 3569 out_unlink_ns: 3570 mutex_lock(&ctrl->subsys->lock); 3571 list_del_rcu(&ns->siblings); 3572 mutex_unlock(&ctrl->subsys->lock); 3573 nvme_put_ns_head(ns->head); 3574 out_free_id: 3575 kfree(id); 3576 out_free_queue: 3577 blk_cleanup_queue(ns->queue); 3578 out_free_ns: 3579 kfree(ns); 3580 if (ret > 0) 3581 ret = blk_status_to_errno(nvme_error_status(ret)); 3582 return ret; 3583 } 3584 3585 static void nvme_ns_remove(struct nvme_ns *ns) 3586 { 3587 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 3588 return; 3589 3590 nvme_fault_inject_fini(&ns->fault_inject); 3591 3592 mutex_lock(&ns->ctrl->subsys->lock); 3593 list_del_rcu(&ns->siblings); 3594 mutex_unlock(&ns->ctrl->subsys->lock); 3595 synchronize_rcu(); /* guarantee not available in head->list */ 3596 nvme_mpath_clear_current_path(ns); 3597 synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */ 3598 3599 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 3600 del_gendisk(ns->disk); 3601 blk_cleanup_queue(ns->queue); 3602 if (blk_get_integrity(ns->disk)) 3603 blk_integrity_unregister(ns->disk); 3604 } 3605 3606 down_write(&ns->ctrl->namespaces_rwsem); 3607 list_del_init(&ns->list); 3608 up_write(&ns->ctrl->namespaces_rwsem); 3609 3610 nvme_mpath_check_last_path(ns); 3611 nvme_put_ns(ns); 3612 } 3613 3614 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3615 { 3616 struct nvme_ns *ns; 3617 3618 ns = nvme_find_get_ns(ctrl, nsid); 3619 if (ns) { 3620 if (ns->disk && revalidate_disk(ns->disk)) 3621 nvme_ns_remove(ns); 3622 nvme_put_ns(ns); 3623 } else 3624 nvme_alloc_ns(ctrl, nsid); 3625 } 3626 3627 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 3628 unsigned nsid) 3629 { 3630 struct nvme_ns *ns, *next; 3631 LIST_HEAD(rm_list); 3632 3633 down_write(&ctrl->namespaces_rwsem); 3634 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3635 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) 3636 list_move_tail(&ns->list, &rm_list); 3637 } 3638 up_write(&ctrl->namespaces_rwsem); 3639 3640 list_for_each_entry_safe(ns, next, &rm_list, list) 3641 nvme_ns_remove(ns); 3642 3643 } 3644 3645 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) 3646 { 3647 struct nvme_ns *ns; 3648 __le32 *ns_list; 3649 unsigned i, j, nsid, prev = 0; 3650 unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024); 3651 int ret = 0; 3652 3653 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 3654 if (!ns_list) 3655 return -ENOMEM; 3656 3657 for (i = 0; i < num_lists; i++) { 3658 ret = nvme_identify_ns_list(ctrl, prev, ns_list); 3659 if (ret) 3660 goto free; 3661 3662 for (j = 0; j < min(nn, 1024U); j++) { 3663 nsid = le32_to_cpu(ns_list[j]); 3664 if (!nsid) 3665 goto out; 3666 3667 nvme_validate_ns(ctrl, nsid); 3668 3669 while (++prev < nsid) { 3670 ns = nvme_find_get_ns(ctrl, prev); 3671 if (ns) { 3672 nvme_ns_remove(ns); 3673 nvme_put_ns(ns); 3674 } 3675 } 3676 } 3677 nn -= j; 3678 } 3679 out: 3680 nvme_remove_invalid_namespaces(ctrl, prev); 3681 free: 3682 kfree(ns_list); 3683 return ret; 3684 } 3685 3686 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) 3687 { 3688 unsigned i; 3689 3690 for (i = 1; i <= nn; i++) 3691 nvme_validate_ns(ctrl, i); 3692 3693 nvme_remove_invalid_namespaces(ctrl, nn); 3694 } 3695 3696 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 3697 { 3698 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 3699 __le32 *log; 3700 int error; 3701 3702 log = kzalloc(log_size, GFP_KERNEL); 3703 if (!log) 3704 return; 3705 3706 /* 3707 * We need to read the log to clear the AEN, but we don't want to rely 3708 * on it for the changed namespace information as userspace could have 3709 * raced with us in reading the log page, which could cause us to miss 3710 * updates. 3711 */ 3712 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log, 3713 log_size, 0); 3714 if (error) 3715 dev_warn(ctrl->device, 3716 "reading changed ns log failed: %d\n", error); 3717 3718 kfree(log); 3719 } 3720 3721 static void nvme_scan_work(struct work_struct *work) 3722 { 3723 struct nvme_ctrl *ctrl = 3724 container_of(work, struct nvme_ctrl, scan_work); 3725 struct nvme_id_ctrl *id; 3726 unsigned nn; 3727 3728 /* No tagset on a live ctrl means IO queues could not created */ 3729 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) 3730 return; 3731 3732 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 3733 dev_info(ctrl->device, "rescanning namespaces.\n"); 3734 nvme_clear_changed_ns_log(ctrl); 3735 } 3736 3737 if (nvme_identify_ctrl(ctrl, &id)) 3738 return; 3739 3740 mutex_lock(&ctrl->scan_lock); 3741 nn = le32_to_cpu(id->nn); 3742 if (ctrl->vs >= NVME_VS(1, 1, 0) && 3743 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 3744 if (!nvme_scan_ns_list(ctrl, nn)) 3745 goto out_free_id; 3746 } 3747 nvme_scan_ns_sequential(ctrl, nn); 3748 out_free_id: 3749 mutex_unlock(&ctrl->scan_lock); 3750 kfree(id); 3751 down_write(&ctrl->namespaces_rwsem); 3752 list_sort(NULL, &ctrl->namespaces, ns_cmp); 3753 up_write(&ctrl->namespaces_rwsem); 3754 } 3755 3756 /* 3757 * This function iterates the namespace list unlocked to allow recovery from 3758 * controller failure. It is up to the caller to ensure the namespace list is 3759 * not modified by scan work while this function is executing. 3760 */ 3761 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 3762 { 3763 struct nvme_ns *ns, *next; 3764 LIST_HEAD(ns_list); 3765 3766 /* 3767 * make sure to requeue I/O to all namespaces as these 3768 * might result from the scan itself and must complete 3769 * for the scan_work to make progress 3770 */ 3771 nvme_mpath_clear_ctrl_paths(ctrl); 3772 3773 /* prevent racing with ns scanning */ 3774 flush_work(&ctrl->scan_work); 3775 3776 /* 3777 * The dead states indicates the controller was not gracefully 3778 * disconnected. In that case, we won't be able to flush any data while 3779 * removing the namespaces' disks; fail all the queues now to avoid 3780 * potentially having to clean up the failed sync later. 3781 */ 3782 if (ctrl->state == NVME_CTRL_DEAD) 3783 nvme_kill_queues(ctrl); 3784 3785 down_write(&ctrl->namespaces_rwsem); 3786 list_splice_init(&ctrl->namespaces, &ns_list); 3787 up_write(&ctrl->namespaces_rwsem); 3788 3789 list_for_each_entry_safe(ns, next, &ns_list, list) 3790 nvme_ns_remove(ns); 3791 } 3792 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 3793 3794 static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) 3795 { 3796 struct nvme_ctrl *ctrl = 3797 container_of(dev, struct nvme_ctrl, ctrl_device); 3798 struct nvmf_ctrl_options *opts = ctrl->opts; 3799 int ret; 3800 3801 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); 3802 if (ret) 3803 return ret; 3804 3805 if (opts) { 3806 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); 3807 if (ret) 3808 return ret; 3809 3810 ret = add_uevent_var(env, "NVME_TRSVCID=%s", 3811 opts->trsvcid ?: "none"); 3812 if (ret) 3813 return ret; 3814 3815 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", 3816 opts->host_traddr ?: "none"); 3817 } 3818 return ret; 3819 } 3820 3821 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 3822 { 3823 char *envp[2] = { NULL, NULL }; 3824 u32 aen_result = ctrl->aen_result; 3825 3826 ctrl->aen_result = 0; 3827 if (!aen_result) 3828 return; 3829 3830 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 3831 if (!envp[0]) 3832 return; 3833 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 3834 kfree(envp[0]); 3835 } 3836 3837 static void nvme_async_event_work(struct work_struct *work) 3838 { 3839 struct nvme_ctrl *ctrl = 3840 container_of(work, struct nvme_ctrl, async_event_work); 3841 3842 nvme_aen_uevent(ctrl); 3843 ctrl->ops->submit_async_event(ctrl); 3844 } 3845 3846 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 3847 { 3848 3849 u32 csts; 3850 3851 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 3852 return false; 3853 3854 if (csts == ~0) 3855 return false; 3856 3857 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 3858 } 3859 3860 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 3861 { 3862 struct nvme_fw_slot_info_log *log; 3863 3864 log = kmalloc(sizeof(*log), GFP_KERNEL); 3865 if (!log) 3866 return; 3867 3868 if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, 3869 sizeof(*log), 0)) 3870 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 3871 kfree(log); 3872 } 3873 3874 static void nvme_fw_act_work(struct work_struct *work) 3875 { 3876 struct nvme_ctrl *ctrl = container_of(work, 3877 struct nvme_ctrl, fw_act_work); 3878 unsigned long fw_act_timeout; 3879 3880 if (ctrl->mtfa) 3881 fw_act_timeout = jiffies + 3882 msecs_to_jiffies(ctrl->mtfa * 100); 3883 else 3884 fw_act_timeout = jiffies + 3885 msecs_to_jiffies(admin_timeout * 1000); 3886 3887 nvme_stop_queues(ctrl); 3888 while (nvme_ctrl_pp_status(ctrl)) { 3889 if (time_after(jiffies, fw_act_timeout)) { 3890 dev_warn(ctrl->device, 3891 "Fw activation timeout, reset controller\n"); 3892 nvme_try_sched_reset(ctrl); 3893 return; 3894 } 3895 msleep(100); 3896 } 3897 3898 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) 3899 return; 3900 3901 nvme_start_queues(ctrl); 3902 /* read FW slot information to clear the AER */ 3903 nvme_get_fw_slot_info(ctrl); 3904 } 3905 3906 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 3907 { 3908 u32 aer_notice_type = (result & 0xff00) >> 8; 3909 3910 trace_nvme_async_event(ctrl, aer_notice_type); 3911 3912 switch (aer_notice_type) { 3913 case NVME_AER_NOTICE_NS_CHANGED: 3914 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 3915 nvme_queue_scan(ctrl); 3916 break; 3917 case NVME_AER_NOTICE_FW_ACT_STARTING: 3918 /* 3919 * We are (ab)using the RESETTING state to prevent subsequent 3920 * recovery actions from interfering with the controller's 3921 * firmware activation. 3922 */ 3923 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 3924 queue_work(nvme_wq, &ctrl->fw_act_work); 3925 break; 3926 #ifdef CONFIG_NVME_MULTIPATH 3927 case NVME_AER_NOTICE_ANA: 3928 if (!ctrl->ana_log_buf) 3929 break; 3930 queue_work(nvme_wq, &ctrl->ana_work); 3931 break; 3932 #endif 3933 case NVME_AER_NOTICE_DISC_CHANGED: 3934 ctrl->aen_result = result; 3935 break; 3936 default: 3937 dev_warn(ctrl->device, "async event result %08x\n", result); 3938 } 3939 } 3940 3941 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 3942 volatile union nvme_result *res) 3943 { 3944 u32 result = le32_to_cpu(res->u32); 3945 u32 aer_type = result & 0x07; 3946 3947 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 3948 return; 3949 3950 switch (aer_type) { 3951 case NVME_AER_NOTICE: 3952 nvme_handle_aen_notice(ctrl, result); 3953 break; 3954 case NVME_AER_ERROR: 3955 case NVME_AER_SMART: 3956 case NVME_AER_CSS: 3957 case NVME_AER_VS: 3958 trace_nvme_async_event(ctrl, aer_type); 3959 ctrl->aen_result = result; 3960 break; 3961 default: 3962 break; 3963 } 3964 queue_work(nvme_wq, &ctrl->async_event_work); 3965 } 3966 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 3967 3968 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 3969 { 3970 nvme_mpath_stop(ctrl); 3971 nvme_stop_keep_alive(ctrl); 3972 flush_work(&ctrl->async_event_work); 3973 cancel_work_sync(&ctrl->fw_act_work); 3974 } 3975 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 3976 3977 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 3978 { 3979 if (ctrl->kato) 3980 nvme_start_keep_alive(ctrl); 3981 3982 nvme_enable_aen(ctrl); 3983 3984 if (ctrl->queue_count > 1) { 3985 nvme_queue_scan(ctrl); 3986 nvme_start_queues(ctrl); 3987 } 3988 } 3989 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 3990 3991 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 3992 { 3993 nvme_fault_inject_fini(&ctrl->fault_inject); 3994 dev_pm_qos_hide_latency_tolerance(ctrl->device); 3995 cdev_device_del(&ctrl->cdev, ctrl->device); 3996 } 3997 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 3998 3999 static void nvme_free_ctrl(struct device *dev) 4000 { 4001 struct nvme_ctrl *ctrl = 4002 container_of(dev, struct nvme_ctrl, ctrl_device); 4003 struct nvme_subsystem *subsys = ctrl->subsys; 4004 4005 if (subsys && ctrl->instance != subsys->instance) 4006 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 4007 4008 kfree(ctrl->effects); 4009 nvme_mpath_uninit(ctrl); 4010 __free_page(ctrl->discard_page); 4011 4012 if (subsys) { 4013 mutex_lock(&nvme_subsystems_lock); 4014 list_del(&ctrl->subsys_entry); 4015 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 4016 mutex_unlock(&nvme_subsystems_lock); 4017 } 4018 4019 ctrl->ops->free_ctrl(ctrl); 4020 4021 if (subsys) 4022 nvme_put_subsystem(subsys); 4023 } 4024 4025 /* 4026 * Initialize a NVMe controller structures. This needs to be called during 4027 * earliest initialization so that we have the initialized structured around 4028 * during probing. 4029 */ 4030 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 4031 const struct nvme_ctrl_ops *ops, unsigned long quirks) 4032 { 4033 int ret; 4034 4035 ctrl->state = NVME_CTRL_NEW; 4036 spin_lock_init(&ctrl->lock); 4037 mutex_init(&ctrl->scan_lock); 4038 INIT_LIST_HEAD(&ctrl->namespaces); 4039 init_rwsem(&ctrl->namespaces_rwsem); 4040 ctrl->dev = dev; 4041 ctrl->ops = ops; 4042 ctrl->quirks = quirks; 4043 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 4044 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 4045 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 4046 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 4047 init_waitqueue_head(&ctrl->state_wq); 4048 4049 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 4050 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 4051 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 4052 4053 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 4054 PAGE_SIZE); 4055 ctrl->discard_page = alloc_page(GFP_KERNEL); 4056 if (!ctrl->discard_page) { 4057 ret = -ENOMEM; 4058 goto out; 4059 } 4060 4061 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); 4062 if (ret < 0) 4063 goto out; 4064 ctrl->instance = ret; 4065 4066 device_initialize(&ctrl->ctrl_device); 4067 ctrl->device = &ctrl->ctrl_device; 4068 ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance); 4069 ctrl->device->class = nvme_class; 4070 ctrl->device->parent = ctrl->dev; 4071 ctrl->device->groups = nvme_dev_attr_groups; 4072 ctrl->device->release = nvme_free_ctrl; 4073 dev_set_drvdata(ctrl->device, ctrl); 4074 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 4075 if (ret) 4076 goto out_release_instance; 4077 4078 cdev_init(&ctrl->cdev, &nvme_dev_fops); 4079 ctrl->cdev.owner = ops->module; 4080 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 4081 if (ret) 4082 goto out_free_name; 4083 4084 /* 4085 * Initialize latency tolerance controls. The sysfs files won't 4086 * be visible to userspace unless the device actually supports APST. 4087 */ 4088 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 4089 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 4090 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 4091 4092 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 4093 4094 return 0; 4095 out_free_name: 4096 kfree_const(ctrl->device->kobj.name); 4097 out_release_instance: 4098 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 4099 out: 4100 if (ctrl->discard_page) 4101 __free_page(ctrl->discard_page); 4102 return ret; 4103 } 4104 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 4105 4106 /** 4107 * nvme_kill_queues(): Ends all namespace queues 4108 * @ctrl: the dead controller that needs to end 4109 * 4110 * Call this function when the driver determines it is unable to get the 4111 * controller in a state capable of servicing IO. 4112 */ 4113 void nvme_kill_queues(struct nvme_ctrl *ctrl) 4114 { 4115 struct nvme_ns *ns; 4116 4117 down_read(&ctrl->namespaces_rwsem); 4118 4119 /* Forcibly unquiesce queues to avoid blocking dispatch */ 4120 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) 4121 blk_mq_unquiesce_queue(ctrl->admin_q); 4122 4123 list_for_each_entry(ns, &ctrl->namespaces, list) 4124 nvme_set_queue_dying(ns); 4125 4126 up_read(&ctrl->namespaces_rwsem); 4127 } 4128 EXPORT_SYMBOL_GPL(nvme_kill_queues); 4129 4130 void nvme_unfreeze(struct nvme_ctrl *ctrl) 4131 { 4132 struct nvme_ns *ns; 4133 4134 down_read(&ctrl->namespaces_rwsem); 4135 list_for_each_entry(ns, &ctrl->namespaces, list) 4136 blk_mq_unfreeze_queue(ns->queue); 4137 up_read(&ctrl->namespaces_rwsem); 4138 } 4139 EXPORT_SYMBOL_GPL(nvme_unfreeze); 4140 4141 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 4142 { 4143 struct nvme_ns *ns; 4144 4145 down_read(&ctrl->namespaces_rwsem); 4146 list_for_each_entry(ns, &ctrl->namespaces, list) { 4147 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 4148 if (timeout <= 0) 4149 break; 4150 } 4151 up_read(&ctrl->namespaces_rwsem); 4152 } 4153 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 4154 4155 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 4156 { 4157 struct nvme_ns *ns; 4158 4159 down_read(&ctrl->namespaces_rwsem); 4160 list_for_each_entry(ns, &ctrl->namespaces, list) 4161 blk_mq_freeze_queue_wait(ns->queue); 4162 up_read(&ctrl->namespaces_rwsem); 4163 } 4164 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 4165 4166 void nvme_start_freeze(struct nvme_ctrl *ctrl) 4167 { 4168 struct nvme_ns *ns; 4169 4170 down_read(&ctrl->namespaces_rwsem); 4171 list_for_each_entry(ns, &ctrl->namespaces, list) 4172 blk_freeze_queue_start(ns->queue); 4173 up_read(&ctrl->namespaces_rwsem); 4174 } 4175 EXPORT_SYMBOL_GPL(nvme_start_freeze); 4176 4177 void nvme_stop_queues(struct nvme_ctrl *ctrl) 4178 { 4179 struct nvme_ns *ns; 4180 4181 down_read(&ctrl->namespaces_rwsem); 4182 list_for_each_entry(ns, &ctrl->namespaces, list) 4183 blk_mq_quiesce_queue(ns->queue); 4184 up_read(&ctrl->namespaces_rwsem); 4185 } 4186 EXPORT_SYMBOL_GPL(nvme_stop_queues); 4187 4188 void nvme_start_queues(struct nvme_ctrl *ctrl) 4189 { 4190 struct nvme_ns *ns; 4191 4192 down_read(&ctrl->namespaces_rwsem); 4193 list_for_each_entry(ns, &ctrl->namespaces, list) 4194 blk_mq_unquiesce_queue(ns->queue); 4195 up_read(&ctrl->namespaces_rwsem); 4196 } 4197 EXPORT_SYMBOL_GPL(nvme_start_queues); 4198 4199 4200 void nvme_sync_queues(struct nvme_ctrl *ctrl) 4201 { 4202 struct nvme_ns *ns; 4203 4204 down_read(&ctrl->namespaces_rwsem); 4205 list_for_each_entry(ns, &ctrl->namespaces, list) 4206 blk_sync_queue(ns->queue); 4207 up_read(&ctrl->namespaces_rwsem); 4208 4209 if (ctrl->admin_q) 4210 blk_sync_queue(ctrl->admin_q); 4211 } 4212 EXPORT_SYMBOL_GPL(nvme_sync_queues); 4213 4214 /* 4215 * Check we didn't inadvertently grow the command structure sizes: 4216 */ 4217 static inline void _nvme_check_size(void) 4218 { 4219 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 4220 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 4221 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 4222 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 4223 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 4224 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 4225 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 4226 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 4227 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 4228 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 4229 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 4230 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 4231 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 4232 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 4233 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 4234 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 4235 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 4236 } 4237 4238 4239 static int __init nvme_core_init(void) 4240 { 4241 int result = -ENOMEM; 4242 4243 _nvme_check_size(); 4244 4245 nvme_wq = alloc_workqueue("nvme-wq", 4246 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4247 if (!nvme_wq) 4248 goto out; 4249 4250 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 4251 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4252 if (!nvme_reset_wq) 4253 goto destroy_wq; 4254 4255 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 4256 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4257 if (!nvme_delete_wq) 4258 goto destroy_reset_wq; 4259 4260 result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme"); 4261 if (result < 0) 4262 goto destroy_delete_wq; 4263 4264 nvme_class = class_create(THIS_MODULE, "nvme"); 4265 if (IS_ERR(nvme_class)) { 4266 result = PTR_ERR(nvme_class); 4267 goto unregister_chrdev; 4268 } 4269 nvme_class->dev_uevent = nvme_class_uevent; 4270 4271 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); 4272 if (IS_ERR(nvme_subsys_class)) { 4273 result = PTR_ERR(nvme_subsys_class); 4274 goto destroy_class; 4275 } 4276 return 0; 4277 4278 destroy_class: 4279 class_destroy(nvme_class); 4280 unregister_chrdev: 4281 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 4282 destroy_delete_wq: 4283 destroy_workqueue(nvme_delete_wq); 4284 destroy_reset_wq: 4285 destroy_workqueue(nvme_reset_wq); 4286 destroy_wq: 4287 destroy_workqueue(nvme_wq); 4288 out: 4289 return result; 4290 } 4291 4292 static void __exit nvme_core_exit(void) 4293 { 4294 class_destroy(nvme_subsys_class); 4295 class_destroy(nvme_class); 4296 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 4297 destroy_workqueue(nvme_delete_wq); 4298 destroy_workqueue(nvme_reset_wq); 4299 destroy_workqueue(nvme_wq); 4300 } 4301 4302 MODULE_LICENSE("GPL"); 4303 MODULE_VERSION("1.0"); 4304 module_init(nvme_core_init); 4305 module_exit(nvme_core_exit); 4306