1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/blkdev.h> 8 #include <linux/blk-mq.h> 9 #include <linux/compat.h> 10 #include <linux/delay.h> 11 #include <linux/errno.h> 12 #include <linux/hdreg.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/backing-dev.h> 16 #include <linux/list_sort.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/pr.h> 20 #include <linux/ptrace.h> 21 #include <linux/nvme_ioctl.h> 22 #include <linux/pm_qos.h> 23 #include <asm/unaligned.h> 24 25 #include "nvme.h" 26 #include "fabrics.h" 27 28 #define CREATE_TRACE_POINTS 29 #include "trace.h" 30 31 #define NVME_MINORS (1U << MINORBITS) 32 33 unsigned int admin_timeout = 60; 34 module_param(admin_timeout, uint, 0644); 35 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 36 EXPORT_SYMBOL_GPL(admin_timeout); 37 38 unsigned int nvme_io_timeout = 30; 39 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 40 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 41 EXPORT_SYMBOL_GPL(nvme_io_timeout); 42 43 static unsigned char shutdown_timeout = 5; 44 module_param(shutdown_timeout, byte, 0644); 45 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 46 47 static u8 nvme_max_retries = 5; 48 module_param_named(max_retries, nvme_max_retries, byte, 0644); 49 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 50 51 static unsigned long default_ps_max_latency_us = 100000; 52 module_param(default_ps_max_latency_us, ulong, 0644); 53 MODULE_PARM_DESC(default_ps_max_latency_us, 54 "max power saving latency for new devices; use PM QOS to change per device"); 55 56 static bool force_apst; 57 module_param(force_apst, bool, 0644); 58 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 59 60 static bool streams; 61 module_param(streams, bool, 0644); 62 MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); 63 64 /* 65 * nvme_wq - hosts nvme related works that are not reset or delete 66 * nvme_reset_wq - hosts nvme reset works 67 * nvme_delete_wq - hosts nvme delete works 68 * 69 * nvme_wq will host works such as scan, aen handling, fw activation, 70 * keep-alive, periodic reconnects etc. nvme_reset_wq 71 * runs reset works which also flush works hosted on nvme_wq for 72 * serialization purposes. nvme_delete_wq host controller deletion 73 * works which flush reset works for serialization. 74 */ 75 struct workqueue_struct *nvme_wq; 76 EXPORT_SYMBOL_GPL(nvme_wq); 77 78 struct workqueue_struct *nvme_reset_wq; 79 EXPORT_SYMBOL_GPL(nvme_reset_wq); 80 81 struct workqueue_struct *nvme_delete_wq; 82 EXPORT_SYMBOL_GPL(nvme_delete_wq); 83 84 static LIST_HEAD(nvme_subsystems); 85 static DEFINE_MUTEX(nvme_subsystems_lock); 86 87 static DEFINE_IDA(nvme_instance_ida); 88 static dev_t nvme_chr_devt; 89 static struct class *nvme_class; 90 static struct class *nvme_subsys_class; 91 92 static int nvme_revalidate_disk(struct gendisk *disk); 93 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 94 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 95 unsigned nsid); 96 97 static void nvme_set_queue_dying(struct nvme_ns *ns) 98 { 99 /* 100 * Revalidating a dead namespace sets capacity to 0. This will end 101 * buffered writers dirtying pages that can't be synced. 102 */ 103 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 104 return; 105 blk_set_queue_dying(ns->queue); 106 /* Forcibly unquiesce queues to avoid blocking dispatch */ 107 blk_mq_unquiesce_queue(ns->queue); 108 /* 109 * Revalidate after unblocking dispatchers that may be holding bd_butex 110 */ 111 revalidate_disk(ns->disk); 112 } 113 114 static void nvme_queue_scan(struct nvme_ctrl *ctrl) 115 { 116 /* 117 * Only new queue scan work when admin and IO queues are both alive 118 */ 119 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) 120 queue_work(nvme_wq, &ctrl->scan_work); 121 } 122 123 /* 124 * Use this function to proceed with scheduling reset_work for a controller 125 * that had previously been set to the resetting state. This is intended for 126 * code paths that can't be interrupted by other reset attempts. A hot removal 127 * may prevent this from succeeding. 128 */ 129 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) 130 { 131 if (ctrl->state != NVME_CTRL_RESETTING) 132 return -EBUSY; 133 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 134 return -EBUSY; 135 return 0; 136 } 137 EXPORT_SYMBOL_GPL(nvme_try_sched_reset); 138 139 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 140 { 141 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 142 return -EBUSY; 143 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 144 return -EBUSY; 145 return 0; 146 } 147 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 148 149 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 150 { 151 int ret; 152 153 ret = nvme_reset_ctrl(ctrl); 154 if (!ret) { 155 flush_work(&ctrl->reset_work); 156 if (ctrl->state != NVME_CTRL_LIVE) 157 ret = -ENETRESET; 158 } 159 160 return ret; 161 } 162 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); 163 164 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 165 { 166 dev_info(ctrl->device, 167 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); 168 169 flush_work(&ctrl->reset_work); 170 nvme_stop_ctrl(ctrl); 171 nvme_remove_namespaces(ctrl); 172 ctrl->ops->delete_ctrl(ctrl); 173 nvme_uninit_ctrl(ctrl); 174 } 175 176 static void nvme_delete_ctrl_work(struct work_struct *work) 177 { 178 struct nvme_ctrl *ctrl = 179 container_of(work, struct nvme_ctrl, delete_work); 180 181 nvme_do_delete_ctrl(ctrl); 182 } 183 184 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 185 { 186 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 187 return -EBUSY; 188 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 189 return -EBUSY; 190 return 0; 191 } 192 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 193 194 static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 195 { 196 /* 197 * Keep a reference until nvme_do_delete_ctrl() complete, 198 * since ->delete_ctrl can free the controller. 199 */ 200 nvme_get_ctrl(ctrl); 201 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 202 nvme_do_delete_ctrl(ctrl); 203 nvme_put_ctrl(ctrl); 204 } 205 206 static blk_status_t nvme_error_status(u16 status) 207 { 208 switch (status & 0x7ff) { 209 case NVME_SC_SUCCESS: 210 return BLK_STS_OK; 211 case NVME_SC_CAP_EXCEEDED: 212 return BLK_STS_NOSPC; 213 case NVME_SC_LBA_RANGE: 214 case NVME_SC_CMD_INTERRUPTED: 215 case NVME_SC_NS_NOT_READY: 216 return BLK_STS_TARGET; 217 case NVME_SC_BAD_ATTRIBUTES: 218 case NVME_SC_ONCS_NOT_SUPPORTED: 219 case NVME_SC_INVALID_OPCODE: 220 case NVME_SC_INVALID_FIELD: 221 case NVME_SC_INVALID_NS: 222 return BLK_STS_NOTSUPP; 223 case NVME_SC_WRITE_FAULT: 224 case NVME_SC_READ_ERROR: 225 case NVME_SC_UNWRITTEN_BLOCK: 226 case NVME_SC_ACCESS_DENIED: 227 case NVME_SC_READ_ONLY: 228 case NVME_SC_COMPARE_FAILED: 229 return BLK_STS_MEDIUM; 230 case NVME_SC_GUARD_CHECK: 231 case NVME_SC_APPTAG_CHECK: 232 case NVME_SC_REFTAG_CHECK: 233 case NVME_SC_INVALID_PI: 234 return BLK_STS_PROTECTION; 235 case NVME_SC_RESERVATION_CONFLICT: 236 return BLK_STS_NEXUS; 237 case NVME_SC_HOST_PATH_ERROR: 238 return BLK_STS_TRANSPORT; 239 default: 240 return BLK_STS_IOERR; 241 } 242 } 243 244 static inline bool nvme_req_needs_retry(struct request *req) 245 { 246 if (blk_noretry_request(req)) 247 return false; 248 if (nvme_req(req)->status & NVME_SC_DNR) 249 return false; 250 if (nvme_req(req)->retries >= nvme_max_retries) 251 return false; 252 return true; 253 } 254 255 static void nvme_retry_req(struct request *req) 256 { 257 struct nvme_ns *ns = req->q->queuedata; 258 unsigned long delay = 0; 259 u16 crd; 260 261 /* The mask and shift result must be <= 3 */ 262 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 263 if (ns && crd) 264 delay = ns->ctrl->crdt[crd - 1] * 100; 265 266 nvme_req(req)->retries++; 267 blk_mq_requeue_request(req, false); 268 blk_mq_delay_kick_requeue_list(req->q, delay); 269 } 270 271 void nvme_complete_rq(struct request *req) 272 { 273 blk_status_t status = nvme_error_status(nvme_req(req)->status); 274 275 trace_nvme_complete_rq(req); 276 277 nvme_cleanup_cmd(req); 278 279 if (nvme_req(req)->ctrl->kas) 280 nvme_req(req)->ctrl->comp_seen = true; 281 282 if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { 283 if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req)) 284 return; 285 286 if (!blk_queue_dying(req->q)) { 287 nvme_retry_req(req); 288 return; 289 } 290 } 291 292 nvme_trace_bio_complete(req, status); 293 blk_mq_end_request(req, status); 294 } 295 EXPORT_SYMBOL_GPL(nvme_complete_rq); 296 297 bool nvme_cancel_request(struct request *req, void *data, bool reserved) 298 { 299 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 300 "Cancelling I/O %d", req->tag); 301 302 /* don't abort one completed request */ 303 if (blk_mq_request_completed(req)) 304 return true; 305 306 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; 307 blk_mq_force_complete_rq(req); 308 return true; 309 } 310 EXPORT_SYMBOL_GPL(nvme_cancel_request); 311 312 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 313 enum nvme_ctrl_state new_state) 314 { 315 enum nvme_ctrl_state old_state; 316 unsigned long flags; 317 bool changed = false; 318 319 spin_lock_irqsave(&ctrl->lock, flags); 320 321 old_state = ctrl->state; 322 switch (new_state) { 323 case NVME_CTRL_LIVE: 324 switch (old_state) { 325 case NVME_CTRL_NEW: 326 case NVME_CTRL_RESETTING: 327 case NVME_CTRL_CONNECTING: 328 changed = true; 329 /* FALLTHRU */ 330 default: 331 break; 332 } 333 break; 334 case NVME_CTRL_RESETTING: 335 switch (old_state) { 336 case NVME_CTRL_NEW: 337 case NVME_CTRL_LIVE: 338 changed = true; 339 /* FALLTHRU */ 340 default: 341 break; 342 } 343 break; 344 case NVME_CTRL_CONNECTING: 345 switch (old_state) { 346 case NVME_CTRL_NEW: 347 case NVME_CTRL_RESETTING: 348 changed = true; 349 /* FALLTHRU */ 350 default: 351 break; 352 } 353 break; 354 case NVME_CTRL_DELETING: 355 switch (old_state) { 356 case NVME_CTRL_LIVE: 357 case NVME_CTRL_RESETTING: 358 case NVME_CTRL_CONNECTING: 359 changed = true; 360 /* FALLTHRU */ 361 default: 362 break; 363 } 364 break; 365 case NVME_CTRL_DEAD: 366 switch (old_state) { 367 case NVME_CTRL_DELETING: 368 changed = true; 369 /* FALLTHRU */ 370 default: 371 break; 372 } 373 break; 374 default: 375 break; 376 } 377 378 if (changed) { 379 ctrl->state = new_state; 380 wake_up_all(&ctrl->state_wq); 381 } 382 383 spin_unlock_irqrestore(&ctrl->lock, flags); 384 if (changed && ctrl->state == NVME_CTRL_LIVE) 385 nvme_kick_requeue_lists(ctrl); 386 return changed; 387 } 388 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 389 390 /* 391 * Returns true for sink states that can't ever transition back to live. 392 */ 393 static bool nvme_state_terminal(struct nvme_ctrl *ctrl) 394 { 395 switch (ctrl->state) { 396 case NVME_CTRL_NEW: 397 case NVME_CTRL_LIVE: 398 case NVME_CTRL_RESETTING: 399 case NVME_CTRL_CONNECTING: 400 return false; 401 case NVME_CTRL_DELETING: 402 case NVME_CTRL_DEAD: 403 return true; 404 default: 405 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); 406 return true; 407 } 408 } 409 410 /* 411 * Waits for the controller state to be resetting, or returns false if it is 412 * not possible to ever transition to that state. 413 */ 414 bool nvme_wait_reset(struct nvme_ctrl *ctrl) 415 { 416 wait_event(ctrl->state_wq, 417 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || 418 nvme_state_terminal(ctrl)); 419 return ctrl->state == NVME_CTRL_RESETTING; 420 } 421 EXPORT_SYMBOL_GPL(nvme_wait_reset); 422 423 static void nvme_free_ns_head(struct kref *ref) 424 { 425 struct nvme_ns_head *head = 426 container_of(ref, struct nvme_ns_head, ref); 427 428 nvme_mpath_remove_disk(head); 429 ida_simple_remove(&head->subsys->ns_ida, head->instance); 430 cleanup_srcu_struct(&head->srcu); 431 nvme_put_subsystem(head->subsys); 432 kfree(head); 433 } 434 435 static void nvme_put_ns_head(struct nvme_ns_head *head) 436 { 437 kref_put(&head->ref, nvme_free_ns_head); 438 } 439 440 static void nvme_free_ns(struct kref *kref) 441 { 442 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 443 444 if (ns->ndev) 445 nvme_nvm_unregister(ns); 446 447 put_disk(ns->disk); 448 nvme_put_ns_head(ns->head); 449 nvme_put_ctrl(ns->ctrl); 450 kfree(ns); 451 } 452 453 static void nvme_put_ns(struct nvme_ns *ns) 454 { 455 kref_put(&ns->kref, nvme_free_ns); 456 } 457 458 static inline void nvme_clear_nvme_request(struct request *req) 459 { 460 if (!(req->rq_flags & RQF_DONTPREP)) { 461 nvme_req(req)->retries = 0; 462 nvme_req(req)->flags = 0; 463 req->rq_flags |= RQF_DONTPREP; 464 } 465 } 466 467 struct request *nvme_alloc_request(struct request_queue *q, 468 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) 469 { 470 unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 471 struct request *req; 472 473 if (qid == NVME_QID_ANY) { 474 req = blk_mq_alloc_request(q, op, flags); 475 } else { 476 req = blk_mq_alloc_request_hctx(q, op, flags, 477 qid ? qid - 1 : 0); 478 } 479 if (IS_ERR(req)) 480 return req; 481 482 req->cmd_flags |= REQ_FAILFAST_DRIVER; 483 nvme_clear_nvme_request(req); 484 nvme_req(req)->cmd = cmd; 485 486 return req; 487 } 488 EXPORT_SYMBOL_GPL(nvme_alloc_request); 489 490 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) 491 { 492 struct nvme_command c; 493 494 memset(&c, 0, sizeof(c)); 495 496 c.directive.opcode = nvme_admin_directive_send; 497 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); 498 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; 499 c.directive.dtype = NVME_DIR_IDENTIFY; 500 c.directive.tdtype = NVME_DIR_STREAMS; 501 c.directive.endir = enable ? NVME_DIR_ENDIR : 0; 502 503 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); 504 } 505 506 static int nvme_disable_streams(struct nvme_ctrl *ctrl) 507 { 508 return nvme_toggle_streams(ctrl, false); 509 } 510 511 static int nvme_enable_streams(struct nvme_ctrl *ctrl) 512 { 513 return nvme_toggle_streams(ctrl, true); 514 } 515 516 static int nvme_get_stream_params(struct nvme_ctrl *ctrl, 517 struct streams_directive_params *s, u32 nsid) 518 { 519 struct nvme_command c; 520 521 memset(&c, 0, sizeof(c)); 522 memset(s, 0, sizeof(*s)); 523 524 c.directive.opcode = nvme_admin_directive_recv; 525 c.directive.nsid = cpu_to_le32(nsid); 526 c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s))); 527 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 528 c.directive.dtype = NVME_DIR_STREAMS; 529 530 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); 531 } 532 533 static int nvme_configure_directives(struct nvme_ctrl *ctrl) 534 { 535 struct streams_directive_params s; 536 int ret; 537 538 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) 539 return 0; 540 if (!streams) 541 return 0; 542 543 ret = nvme_enable_streams(ctrl); 544 if (ret) 545 return ret; 546 547 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); 548 if (ret) 549 goto out_disable_stream; 550 551 ctrl->nssa = le16_to_cpu(s.nssa); 552 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { 553 dev_info(ctrl->device, "too few streams (%u) available\n", 554 ctrl->nssa); 555 goto out_disable_stream; 556 } 557 558 ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); 559 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); 560 return 0; 561 562 out_disable_stream: 563 nvme_disable_streams(ctrl); 564 return ret; 565 } 566 567 /* 568 * Check if 'req' has a write hint associated with it. If it does, assign 569 * a valid namespace stream to the write. 570 */ 571 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, 572 struct request *req, u16 *control, 573 u32 *dsmgmt) 574 { 575 enum rw_hint streamid = req->write_hint; 576 577 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE) 578 streamid = 0; 579 else { 580 streamid--; 581 if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) 582 return; 583 584 *control |= NVME_RW_DTYPE_STREAMS; 585 *dsmgmt |= streamid << 16; 586 } 587 588 if (streamid < ARRAY_SIZE(req->q->write_hints)) 589 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; 590 } 591 592 static inline void nvme_setup_flush(struct nvme_ns *ns, 593 struct nvme_command *cmnd) 594 { 595 cmnd->common.opcode = nvme_cmd_flush; 596 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 597 } 598 599 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 600 struct nvme_command *cmnd) 601 { 602 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 603 struct nvme_dsm_range *range; 604 struct bio *bio; 605 606 /* 607 * Some devices do not consider the DSM 'Number of Ranges' field when 608 * determining how much data to DMA. Always allocate memory for maximum 609 * number of segments to prevent device reading beyond end of buffer. 610 */ 611 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; 612 613 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); 614 if (!range) { 615 /* 616 * If we fail allocation our range, fallback to the controller 617 * discard page. If that's also busy, it's safe to return 618 * busy, as we know we can make progress once that's freed. 619 */ 620 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 621 return BLK_STS_RESOURCE; 622 623 range = page_address(ns->ctrl->discard_page); 624 } 625 626 __rq_for_each_bio(bio, req) { 627 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); 628 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 629 630 if (n < segments) { 631 range[n].cattr = cpu_to_le32(0); 632 range[n].nlb = cpu_to_le32(nlb); 633 range[n].slba = cpu_to_le64(slba); 634 } 635 n++; 636 } 637 638 if (WARN_ON_ONCE(n != segments)) { 639 if (virt_to_page(range) == ns->ctrl->discard_page) 640 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 641 else 642 kfree(range); 643 return BLK_STS_IOERR; 644 } 645 646 cmnd->dsm.opcode = nvme_cmd_dsm; 647 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 648 cmnd->dsm.nr = cpu_to_le32(segments - 1); 649 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 650 651 req->special_vec.bv_page = virt_to_page(range); 652 req->special_vec.bv_offset = offset_in_page(range); 653 req->special_vec.bv_len = alloc_size; 654 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 655 656 return BLK_STS_OK; 657 } 658 659 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 660 struct request *req, struct nvme_command *cmnd) 661 { 662 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 663 return nvme_setup_discard(ns, req, cmnd); 664 665 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 666 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 667 cmnd->write_zeroes.slba = 668 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 669 cmnd->write_zeroes.length = 670 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 671 cmnd->write_zeroes.control = 0; 672 return BLK_STS_OK; 673 } 674 675 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 676 struct request *req, struct nvme_command *cmnd) 677 { 678 struct nvme_ctrl *ctrl = ns->ctrl; 679 u16 control = 0; 680 u32 dsmgmt = 0; 681 682 if (req->cmd_flags & REQ_FUA) 683 control |= NVME_RW_FUA; 684 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 685 control |= NVME_RW_LR; 686 687 if (req->cmd_flags & REQ_RAHEAD) 688 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 689 690 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); 691 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 692 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 693 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 694 695 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) 696 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); 697 698 if (ns->ms) { 699 /* 700 * If formated with metadata, the block layer always provides a 701 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 702 * we enable the PRACT bit for protection information or set the 703 * namespace capacity to zero to prevent any I/O. 704 */ 705 if (!blk_integrity_rq(req)) { 706 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 707 return BLK_STS_NOTSUPP; 708 control |= NVME_RW_PRINFO_PRACT; 709 } 710 711 switch (ns->pi_type) { 712 case NVME_NS_DPS_PI_TYPE3: 713 control |= NVME_RW_PRINFO_PRCHK_GUARD; 714 break; 715 case NVME_NS_DPS_PI_TYPE1: 716 case NVME_NS_DPS_PI_TYPE2: 717 control |= NVME_RW_PRINFO_PRCHK_GUARD | 718 NVME_RW_PRINFO_PRCHK_REF; 719 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 720 break; 721 } 722 } 723 724 cmnd->rw.control = cpu_to_le16(control); 725 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 726 return 0; 727 } 728 729 void nvme_cleanup_cmd(struct request *req) 730 { 731 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 732 struct nvme_ns *ns = req->rq_disk->private_data; 733 struct page *page = req->special_vec.bv_page; 734 735 if (page == ns->ctrl->discard_page) 736 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 737 else 738 kfree(page_address(page) + req->special_vec.bv_offset); 739 } 740 } 741 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 742 743 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 744 struct nvme_command *cmd) 745 { 746 blk_status_t ret = BLK_STS_OK; 747 748 nvme_clear_nvme_request(req); 749 750 memset(cmd, 0, sizeof(*cmd)); 751 switch (req_op(req)) { 752 case REQ_OP_DRV_IN: 753 case REQ_OP_DRV_OUT: 754 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); 755 break; 756 case REQ_OP_FLUSH: 757 nvme_setup_flush(ns, cmd); 758 break; 759 case REQ_OP_WRITE_ZEROES: 760 ret = nvme_setup_write_zeroes(ns, req, cmd); 761 break; 762 case REQ_OP_DISCARD: 763 ret = nvme_setup_discard(ns, req, cmd); 764 break; 765 case REQ_OP_READ: 766 case REQ_OP_WRITE: 767 ret = nvme_setup_rw(ns, req, cmd); 768 break; 769 default: 770 WARN_ON_ONCE(1); 771 return BLK_STS_IOERR; 772 } 773 774 cmd->common.command_id = req->tag; 775 trace_nvme_setup_cmd(req, cmd); 776 return ret; 777 } 778 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 779 780 static void nvme_end_sync_rq(struct request *rq, blk_status_t error) 781 { 782 struct completion *waiting = rq->end_io_data; 783 784 rq->end_io_data = NULL; 785 complete(waiting); 786 } 787 788 static void nvme_execute_rq_polled(struct request_queue *q, 789 struct gendisk *bd_disk, struct request *rq, int at_head) 790 { 791 DECLARE_COMPLETION_ONSTACK(wait); 792 793 WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)); 794 795 rq->cmd_flags |= REQ_HIPRI; 796 rq->end_io_data = &wait; 797 blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq); 798 799 while (!completion_done(&wait)) { 800 blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true); 801 cond_resched(); 802 } 803 } 804 805 /* 806 * Returns 0 on success. If the result is negative, it's a Linux error code; 807 * if the result is positive, it's an NVM Express status code 808 */ 809 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 810 union nvme_result *result, void *buffer, unsigned bufflen, 811 unsigned timeout, int qid, int at_head, 812 blk_mq_req_flags_t flags, bool poll) 813 { 814 struct request *req; 815 int ret; 816 817 req = nvme_alloc_request(q, cmd, flags, qid); 818 if (IS_ERR(req)) 819 return PTR_ERR(req); 820 821 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 822 823 if (buffer && bufflen) { 824 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 825 if (ret) 826 goto out; 827 } 828 829 if (poll) 830 nvme_execute_rq_polled(req->q, NULL, req, at_head); 831 else 832 blk_execute_rq(req->q, NULL, req, at_head); 833 if (result) 834 *result = nvme_req(req)->result; 835 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 836 ret = -EINTR; 837 else 838 ret = nvme_req(req)->status; 839 out: 840 blk_mq_free_request(req); 841 return ret; 842 } 843 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 844 845 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 846 void *buffer, unsigned bufflen) 847 { 848 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, 849 NVME_QID_ANY, 0, 0, false); 850 } 851 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 852 853 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, 854 unsigned len, u32 seed, bool write) 855 { 856 struct bio_integrity_payload *bip; 857 int ret = -ENOMEM; 858 void *buf; 859 860 buf = kmalloc(len, GFP_KERNEL); 861 if (!buf) 862 goto out; 863 864 ret = -EFAULT; 865 if (write && copy_from_user(buf, ubuf, len)) 866 goto out_free_meta; 867 868 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 869 if (IS_ERR(bip)) { 870 ret = PTR_ERR(bip); 871 goto out_free_meta; 872 } 873 874 bip->bip_iter.bi_size = len; 875 bip->bip_iter.bi_sector = seed; 876 ret = bio_integrity_add_page(bio, virt_to_page(buf), len, 877 offset_in_page(buf)); 878 if (ret == len) 879 return buf; 880 ret = -ENOMEM; 881 out_free_meta: 882 kfree(buf); 883 out: 884 return ERR_PTR(ret); 885 } 886 887 static int nvme_submit_user_cmd(struct request_queue *q, 888 struct nvme_command *cmd, void __user *ubuffer, 889 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 890 u32 meta_seed, u64 *result, unsigned timeout) 891 { 892 bool write = nvme_is_write(cmd); 893 struct nvme_ns *ns = q->queuedata; 894 struct gendisk *disk = ns ? ns->disk : NULL; 895 struct request *req; 896 struct bio *bio = NULL; 897 void *meta = NULL; 898 int ret; 899 900 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); 901 if (IS_ERR(req)) 902 return PTR_ERR(req); 903 904 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 905 nvme_req(req)->flags |= NVME_REQ_USERCMD; 906 907 if (ubuffer && bufflen) { 908 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 909 GFP_KERNEL); 910 if (ret) 911 goto out; 912 bio = req->bio; 913 bio->bi_disk = disk; 914 if (disk && meta_buffer && meta_len) { 915 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, 916 meta_seed, write); 917 if (IS_ERR(meta)) { 918 ret = PTR_ERR(meta); 919 goto out_unmap; 920 } 921 req->cmd_flags |= REQ_INTEGRITY; 922 } 923 } 924 925 blk_execute_rq(req->q, disk, req, 0); 926 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 927 ret = -EINTR; 928 else 929 ret = nvme_req(req)->status; 930 if (result) 931 *result = le64_to_cpu(nvme_req(req)->result.u64); 932 if (meta && !ret && !write) { 933 if (copy_to_user(meta_buffer, meta, meta_len)) 934 ret = -EFAULT; 935 } 936 kfree(meta); 937 out_unmap: 938 if (bio) 939 blk_rq_unmap_user(bio); 940 out: 941 blk_mq_free_request(req); 942 return ret; 943 } 944 945 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) 946 { 947 struct nvme_ctrl *ctrl = rq->end_io_data; 948 unsigned long flags; 949 bool startka = false; 950 951 blk_mq_free_request(rq); 952 953 if (status) { 954 dev_err(ctrl->device, 955 "failed nvme_keep_alive_end_io error=%d\n", 956 status); 957 return; 958 } 959 960 ctrl->comp_seen = false; 961 spin_lock_irqsave(&ctrl->lock, flags); 962 if (ctrl->state == NVME_CTRL_LIVE || 963 ctrl->state == NVME_CTRL_CONNECTING) 964 startka = true; 965 spin_unlock_irqrestore(&ctrl->lock, flags); 966 if (startka) 967 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); 968 } 969 970 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 971 { 972 struct request *rq; 973 974 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, 975 NVME_QID_ANY); 976 if (IS_ERR(rq)) 977 return PTR_ERR(rq); 978 979 rq->timeout = ctrl->kato * HZ; 980 rq->end_io_data = ctrl; 981 982 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); 983 984 return 0; 985 } 986 987 static void nvme_keep_alive_work(struct work_struct *work) 988 { 989 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 990 struct nvme_ctrl, ka_work); 991 bool comp_seen = ctrl->comp_seen; 992 993 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 994 dev_dbg(ctrl->device, 995 "reschedule traffic based keep-alive timer\n"); 996 ctrl->comp_seen = false; 997 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); 998 return; 999 } 1000 1001 if (nvme_keep_alive(ctrl)) { 1002 /* allocation failure, reset the controller */ 1003 dev_err(ctrl->device, "keep-alive failed\n"); 1004 nvme_reset_ctrl(ctrl); 1005 return; 1006 } 1007 } 1008 1009 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 1010 { 1011 if (unlikely(ctrl->kato == 0)) 1012 return; 1013 1014 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); 1015 } 1016 1017 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 1018 { 1019 if (unlikely(ctrl->kato == 0)) 1020 return; 1021 1022 cancel_delayed_work_sync(&ctrl->ka_work); 1023 } 1024 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 1025 1026 /* 1027 * In NVMe 1.0 the CNS field was just a binary controller or namespace 1028 * flag, thus sending any new CNS opcodes has a big chance of not working. 1029 * Qemu unfortunately had that bug after reporting a 1.1 version compliance 1030 * (but not for any later version). 1031 */ 1032 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) 1033 { 1034 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) 1035 return ctrl->vs < NVME_VS(1, 2, 0); 1036 return ctrl->vs < NVME_VS(1, 1, 0); 1037 } 1038 1039 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 1040 { 1041 struct nvme_command c = { }; 1042 int error; 1043 1044 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1045 c.identify.opcode = nvme_admin_identify; 1046 c.identify.cns = NVME_ID_CNS_CTRL; 1047 1048 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 1049 if (!*id) 1050 return -ENOMEM; 1051 1052 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1053 sizeof(struct nvme_id_ctrl)); 1054 if (error) 1055 kfree(*id); 1056 return error; 1057 } 1058 1059 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, 1060 struct nvme_ns_id_desc *cur) 1061 { 1062 const char *warn_str = "ctrl returned bogus length:"; 1063 void *data = cur; 1064 1065 switch (cur->nidt) { 1066 case NVME_NIDT_EUI64: 1067 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1068 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", 1069 warn_str, cur->nidl); 1070 return -1; 1071 } 1072 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); 1073 return NVME_NIDT_EUI64_LEN; 1074 case NVME_NIDT_NGUID: 1075 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1076 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", 1077 warn_str, cur->nidl); 1078 return -1; 1079 } 1080 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); 1081 return NVME_NIDT_NGUID_LEN; 1082 case NVME_NIDT_UUID: 1083 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1084 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", 1085 warn_str, cur->nidl); 1086 return -1; 1087 } 1088 uuid_copy(&ids->uuid, data + sizeof(*cur)); 1089 return NVME_NIDT_UUID_LEN; 1090 default: 1091 /* Skip unknown types */ 1092 return cur->nidl; 1093 } 1094 } 1095 1096 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, 1097 struct nvme_ns_ids *ids) 1098 { 1099 struct nvme_command c = { }; 1100 int status; 1101 void *data; 1102 int pos; 1103 int len; 1104 1105 c.identify.opcode = nvme_admin_identify; 1106 c.identify.nsid = cpu_to_le32(nsid); 1107 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1108 1109 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1110 if (!data) 1111 return -ENOMEM; 1112 1113 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1114 NVME_IDENTIFY_DATA_SIZE); 1115 if (status) { 1116 dev_warn(ctrl->device, 1117 "Identify Descriptors failed (%d)\n", status); 1118 /* 1119 * Don't treat an error as fatal, as we potentially already 1120 * have a NGUID or EUI-64. 1121 */ 1122 if (status > 0 && !(status & NVME_SC_DNR)) 1123 status = 0; 1124 goto free_data; 1125 } 1126 1127 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1128 struct nvme_ns_id_desc *cur = data + pos; 1129 1130 if (cur->nidl == 0) 1131 break; 1132 1133 len = nvme_process_ns_desc(ctrl, ids, cur); 1134 if (len < 0) 1135 goto free_data; 1136 1137 len += sizeof(*cur); 1138 } 1139 free_data: 1140 kfree(data); 1141 return status; 1142 } 1143 1144 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) 1145 { 1146 struct nvme_command c = { }; 1147 1148 c.identify.opcode = nvme_admin_identify; 1149 c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; 1150 c.identify.nsid = cpu_to_le32(nsid); 1151 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 1152 NVME_IDENTIFY_DATA_SIZE); 1153 } 1154 1155 static int nvme_identify_ns(struct nvme_ctrl *ctrl, 1156 unsigned nsid, struct nvme_id_ns **id) 1157 { 1158 struct nvme_command c = { }; 1159 int error; 1160 1161 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1162 c.identify.opcode = nvme_admin_identify; 1163 c.identify.nsid = cpu_to_le32(nsid); 1164 c.identify.cns = NVME_ID_CNS_NS; 1165 1166 *id = kmalloc(sizeof(**id), GFP_KERNEL); 1167 if (!*id) 1168 return -ENOMEM; 1169 1170 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); 1171 if (error) { 1172 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1173 kfree(*id); 1174 } 1175 1176 return error; 1177 } 1178 1179 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, 1180 unsigned int dword11, void *buffer, size_t buflen, u32 *result) 1181 { 1182 union nvme_result res = { 0 }; 1183 struct nvme_command c; 1184 int ret; 1185 1186 memset(&c, 0, sizeof(c)); 1187 c.features.opcode = op; 1188 c.features.fid = cpu_to_le32(fid); 1189 c.features.dword11 = cpu_to_le32(dword11); 1190 1191 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1192 buffer, buflen, 0, NVME_QID_ANY, 0, 0, false); 1193 if (ret >= 0 && result) 1194 *result = le32_to_cpu(res.u32); 1195 return ret; 1196 } 1197 1198 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 1199 unsigned int dword11, void *buffer, size_t buflen, 1200 u32 *result) 1201 { 1202 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, 1203 buflen, result); 1204 } 1205 EXPORT_SYMBOL_GPL(nvme_set_features); 1206 1207 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 1208 unsigned int dword11, void *buffer, size_t buflen, 1209 u32 *result) 1210 { 1211 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, 1212 buflen, result); 1213 } 1214 EXPORT_SYMBOL_GPL(nvme_get_features); 1215 1216 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1217 { 1218 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1219 u32 result; 1220 int status, nr_io_queues; 1221 1222 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1223 &result); 1224 if (status < 0) 1225 return status; 1226 1227 /* 1228 * Degraded controllers might return an error when setting the queue 1229 * count. We still want to be able to bring them online and offer 1230 * access to the admin queue, as that might be only way to fix them up. 1231 */ 1232 if (status > 0) { 1233 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1234 *count = 0; 1235 } else { 1236 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1237 *count = min(*count, nr_io_queues); 1238 } 1239 1240 return 0; 1241 } 1242 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1243 1244 #define NVME_AEN_SUPPORTED \ 1245 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ 1246 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) 1247 1248 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1249 { 1250 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1251 int status; 1252 1253 if (!supported_aens) 1254 return; 1255 1256 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1257 NULL, 0, &result); 1258 if (status) 1259 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1260 supported_aens); 1261 1262 queue_work(nvme_wq, &ctrl->async_event_work); 1263 } 1264 1265 /* 1266 * Convert integer values from ioctl structures to user pointers, silently 1267 * ignoring the upper bits in the compat case to match behaviour of 32-bit 1268 * kernels. 1269 */ 1270 static void __user *nvme_to_user_ptr(uintptr_t ptrval) 1271 { 1272 if (in_compat_syscall()) 1273 ptrval = (compat_uptr_t)ptrval; 1274 return (void __user *)ptrval; 1275 } 1276 1277 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1278 { 1279 struct nvme_user_io io; 1280 struct nvme_command c; 1281 unsigned length, meta_len; 1282 void __user *metadata; 1283 1284 if (copy_from_user(&io, uio, sizeof(io))) 1285 return -EFAULT; 1286 if (io.flags) 1287 return -EINVAL; 1288 1289 switch (io.opcode) { 1290 case nvme_cmd_write: 1291 case nvme_cmd_read: 1292 case nvme_cmd_compare: 1293 break; 1294 default: 1295 return -EINVAL; 1296 } 1297 1298 length = (io.nblocks + 1) << ns->lba_shift; 1299 meta_len = (io.nblocks + 1) * ns->ms; 1300 metadata = nvme_to_user_ptr(io.metadata); 1301 1302 if (ns->features & NVME_NS_EXT_LBAS) { 1303 length += meta_len; 1304 meta_len = 0; 1305 } else if (meta_len) { 1306 if ((io.metadata & 3) || !io.metadata) 1307 return -EINVAL; 1308 } 1309 1310 memset(&c, 0, sizeof(c)); 1311 c.rw.opcode = io.opcode; 1312 c.rw.flags = io.flags; 1313 c.rw.nsid = cpu_to_le32(ns->head->ns_id); 1314 c.rw.slba = cpu_to_le64(io.slba); 1315 c.rw.length = cpu_to_le16(io.nblocks); 1316 c.rw.control = cpu_to_le16(io.control); 1317 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 1318 c.rw.reftag = cpu_to_le32(io.reftag); 1319 c.rw.apptag = cpu_to_le16(io.apptag); 1320 c.rw.appmask = cpu_to_le16(io.appmask); 1321 1322 return nvme_submit_user_cmd(ns->queue, &c, 1323 nvme_to_user_ptr(io.addr), length, 1324 metadata, meta_len, lower_32_bits(io.slba), NULL, 0); 1325 } 1326 1327 static u32 nvme_known_admin_effects(u8 opcode) 1328 { 1329 switch (opcode) { 1330 case nvme_admin_format_nvm: 1331 return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | 1332 NVME_CMD_EFFECTS_CSE_MASK; 1333 case nvme_admin_sanitize_nvm: 1334 return NVME_CMD_EFFECTS_CSE_MASK; 1335 default: 1336 break; 1337 } 1338 return 0; 1339 } 1340 1341 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1342 u8 opcode) 1343 { 1344 u32 effects = 0; 1345 1346 if (ns) { 1347 if (ctrl->effects) 1348 effects = le32_to_cpu(ctrl->effects->iocs[opcode]); 1349 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1350 dev_warn(ctrl->device, 1351 "IO command:%02x has unhandled effects:%08x\n", 1352 opcode, effects); 1353 return 0; 1354 } 1355 1356 if (ctrl->effects) 1357 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1358 effects |= nvme_known_admin_effects(opcode); 1359 1360 /* 1361 * For simplicity, IO to all namespaces is quiesced even if the command 1362 * effects say only one namespace is affected. 1363 */ 1364 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1365 mutex_lock(&ctrl->scan_lock); 1366 mutex_lock(&ctrl->subsys->lock); 1367 nvme_mpath_start_freeze(ctrl->subsys); 1368 nvme_mpath_wait_freeze(ctrl->subsys); 1369 nvme_start_freeze(ctrl); 1370 nvme_wait_freeze(ctrl); 1371 } 1372 return effects; 1373 } 1374 1375 static void nvme_update_formats(struct nvme_ctrl *ctrl) 1376 { 1377 struct nvme_ns *ns; 1378 1379 down_read(&ctrl->namespaces_rwsem); 1380 list_for_each_entry(ns, &ctrl->namespaces, list) 1381 if (ns->disk && nvme_revalidate_disk(ns->disk)) 1382 nvme_set_queue_dying(ns); 1383 up_read(&ctrl->namespaces_rwsem); 1384 } 1385 1386 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1387 { 1388 /* 1389 * Revalidate LBA changes prior to unfreezing. This is necessary to 1390 * prevent memory corruption if a logical block size was changed by 1391 * this command. 1392 */ 1393 if (effects & NVME_CMD_EFFECTS_LBCC) 1394 nvme_update_formats(ctrl); 1395 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1396 nvme_unfreeze(ctrl); 1397 nvme_mpath_unfreeze(ctrl->subsys); 1398 mutex_unlock(&ctrl->subsys->lock); 1399 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); 1400 mutex_unlock(&ctrl->scan_lock); 1401 } 1402 if (effects & NVME_CMD_EFFECTS_CCC) 1403 nvme_init_identify(ctrl); 1404 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { 1405 nvme_queue_scan(ctrl); 1406 flush_work(&ctrl->scan_work); 1407 } 1408 } 1409 1410 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1411 struct nvme_passthru_cmd __user *ucmd) 1412 { 1413 struct nvme_passthru_cmd cmd; 1414 struct nvme_command c; 1415 unsigned timeout = 0; 1416 u32 effects; 1417 u64 result; 1418 int status; 1419 1420 if (!capable(CAP_SYS_ADMIN)) 1421 return -EACCES; 1422 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1423 return -EFAULT; 1424 if (cmd.flags) 1425 return -EINVAL; 1426 1427 memset(&c, 0, sizeof(c)); 1428 c.common.opcode = cmd.opcode; 1429 c.common.flags = cmd.flags; 1430 c.common.nsid = cpu_to_le32(cmd.nsid); 1431 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1432 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1433 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 1434 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 1435 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 1436 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 1437 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 1438 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 1439 1440 if (cmd.timeout_ms) 1441 timeout = msecs_to_jiffies(cmd.timeout_ms); 1442 1443 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); 1444 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1445 nvme_to_user_ptr(cmd.addr), cmd.data_len, 1446 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 1447 0, &result, timeout); 1448 nvme_passthru_end(ctrl, effects); 1449 1450 if (status >= 0) { 1451 if (put_user(result, &ucmd->result)) 1452 return -EFAULT; 1453 } 1454 1455 return status; 1456 } 1457 1458 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1459 struct nvme_passthru_cmd64 __user *ucmd) 1460 { 1461 struct nvme_passthru_cmd64 cmd; 1462 struct nvme_command c; 1463 unsigned timeout = 0; 1464 u32 effects; 1465 int status; 1466 1467 if (!capable(CAP_SYS_ADMIN)) 1468 return -EACCES; 1469 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1470 return -EFAULT; 1471 if (cmd.flags) 1472 return -EINVAL; 1473 1474 memset(&c, 0, sizeof(c)); 1475 c.common.opcode = cmd.opcode; 1476 c.common.flags = cmd.flags; 1477 c.common.nsid = cpu_to_le32(cmd.nsid); 1478 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1479 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1480 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 1481 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 1482 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 1483 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 1484 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 1485 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 1486 1487 if (cmd.timeout_ms) 1488 timeout = msecs_to_jiffies(cmd.timeout_ms); 1489 1490 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); 1491 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1492 nvme_to_user_ptr(cmd.addr), cmd.data_len, 1493 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 1494 0, &cmd.result, timeout); 1495 nvme_passthru_end(ctrl, effects); 1496 1497 if (status >= 0) { 1498 if (put_user(cmd.result, &ucmd->result)) 1499 return -EFAULT; 1500 } 1501 1502 return status; 1503 } 1504 1505 /* 1506 * Issue ioctl requests on the first available path. Note that unlike normal 1507 * block layer requests we will not retry failed request on another controller. 1508 */ 1509 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, 1510 struct nvme_ns_head **head, int *srcu_idx) 1511 { 1512 #ifdef CONFIG_NVME_MULTIPATH 1513 if (disk->fops == &nvme_ns_head_ops) { 1514 struct nvme_ns *ns; 1515 1516 *head = disk->private_data; 1517 *srcu_idx = srcu_read_lock(&(*head)->srcu); 1518 ns = nvme_find_path(*head); 1519 if (!ns) 1520 srcu_read_unlock(&(*head)->srcu, *srcu_idx); 1521 return ns; 1522 } 1523 #endif 1524 *head = NULL; 1525 *srcu_idx = -1; 1526 return disk->private_data; 1527 } 1528 1529 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) 1530 { 1531 if (head) 1532 srcu_read_unlock(&head->srcu, idx); 1533 } 1534 1535 static bool is_ctrl_ioctl(unsigned int cmd) 1536 { 1537 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) 1538 return true; 1539 if (is_sed_ioctl(cmd)) 1540 return true; 1541 return false; 1542 } 1543 1544 static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, 1545 void __user *argp, 1546 struct nvme_ns_head *head, 1547 int srcu_idx) 1548 { 1549 struct nvme_ctrl *ctrl = ns->ctrl; 1550 int ret; 1551 1552 nvme_get_ctrl(ns->ctrl); 1553 nvme_put_ns_from_disk(head, srcu_idx); 1554 1555 switch (cmd) { 1556 case NVME_IOCTL_ADMIN_CMD: 1557 ret = nvme_user_cmd(ctrl, NULL, argp); 1558 break; 1559 case NVME_IOCTL_ADMIN64_CMD: 1560 ret = nvme_user_cmd64(ctrl, NULL, argp); 1561 break; 1562 default: 1563 ret = sed_ioctl(ctrl->opal_dev, cmd, argp); 1564 break; 1565 } 1566 nvme_put_ctrl(ctrl); 1567 return ret; 1568 } 1569 1570 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 1571 unsigned int cmd, unsigned long arg) 1572 { 1573 struct nvme_ns_head *head = NULL; 1574 void __user *argp = (void __user *)arg; 1575 struct nvme_ns *ns; 1576 int srcu_idx, ret; 1577 1578 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1579 if (unlikely(!ns)) 1580 return -EWOULDBLOCK; 1581 1582 /* 1583 * Handle ioctls that apply to the controller instead of the namespace 1584 * seperately and drop the ns SRCU reference early. This avoids a 1585 * deadlock when deleting namespaces using the passthrough interface. 1586 */ 1587 if (is_ctrl_ioctl(cmd)) 1588 return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); 1589 1590 switch (cmd) { 1591 case NVME_IOCTL_ID: 1592 force_successful_syscall_return(); 1593 ret = ns->head->ns_id; 1594 break; 1595 case NVME_IOCTL_IO_CMD: 1596 ret = nvme_user_cmd(ns->ctrl, ns, argp); 1597 break; 1598 case NVME_IOCTL_SUBMIT_IO: 1599 ret = nvme_submit_io(ns, argp); 1600 break; 1601 case NVME_IOCTL_IO64_CMD: 1602 ret = nvme_user_cmd64(ns->ctrl, ns, argp); 1603 break; 1604 default: 1605 if (ns->ndev) 1606 ret = nvme_nvm_ioctl(ns, cmd, arg); 1607 else 1608 ret = -ENOTTY; 1609 } 1610 1611 nvme_put_ns_from_disk(head, srcu_idx); 1612 return ret; 1613 } 1614 1615 #ifdef CONFIG_COMPAT 1616 struct nvme_user_io32 { 1617 __u8 opcode; 1618 __u8 flags; 1619 __u16 control; 1620 __u16 nblocks; 1621 __u16 rsvd; 1622 __u64 metadata; 1623 __u64 addr; 1624 __u64 slba; 1625 __u32 dsmgmt; 1626 __u32 reftag; 1627 __u16 apptag; 1628 __u16 appmask; 1629 } __attribute__((__packed__)); 1630 1631 #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32) 1632 1633 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, 1634 unsigned int cmd, unsigned long arg) 1635 { 1636 /* 1637 * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO 1638 * between 32 bit programs and 64 bit kernel. 1639 * The cause is that the results of sizeof(struct nvme_user_io), 1640 * which is used to define NVME_IOCTL_SUBMIT_IO, 1641 * are not same between 32 bit compiler and 64 bit compiler. 1642 * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling 1643 * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs. 1644 * Other IOCTL numbers are same between 32 bit and 64 bit. 1645 * So there is nothing to do regarding to other IOCTL numbers. 1646 */ 1647 if (cmd == NVME_IOCTL_SUBMIT_IO32) 1648 return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg); 1649 1650 return nvme_ioctl(bdev, mode, cmd, arg); 1651 } 1652 #else 1653 #define nvme_compat_ioctl NULL 1654 #endif /* CONFIG_COMPAT */ 1655 1656 static int nvme_open(struct block_device *bdev, fmode_t mode) 1657 { 1658 struct nvme_ns *ns = bdev->bd_disk->private_data; 1659 1660 #ifdef CONFIG_NVME_MULTIPATH 1661 /* should never be called due to GENHD_FL_HIDDEN */ 1662 if (WARN_ON_ONCE(ns->head->disk)) 1663 goto fail; 1664 #endif 1665 if (!kref_get_unless_zero(&ns->kref)) 1666 goto fail; 1667 if (!try_module_get(ns->ctrl->ops->module)) 1668 goto fail_put_ns; 1669 1670 return 0; 1671 1672 fail_put_ns: 1673 nvme_put_ns(ns); 1674 fail: 1675 return -ENXIO; 1676 } 1677 1678 static void nvme_release(struct gendisk *disk, fmode_t mode) 1679 { 1680 struct nvme_ns *ns = disk->private_data; 1681 1682 module_put(ns->ctrl->ops->module); 1683 nvme_put_ns(ns); 1684 } 1685 1686 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1687 { 1688 /* some standard values */ 1689 geo->heads = 1 << 6; 1690 geo->sectors = 1 << 5; 1691 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1692 return 0; 1693 } 1694 1695 #ifdef CONFIG_BLK_DEV_INTEGRITY 1696 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type, 1697 u32 max_integrity_segments) 1698 { 1699 struct blk_integrity integrity; 1700 1701 memset(&integrity, 0, sizeof(integrity)); 1702 switch (pi_type) { 1703 case NVME_NS_DPS_PI_TYPE3: 1704 integrity.profile = &t10_pi_type3_crc; 1705 integrity.tag_size = sizeof(u16) + sizeof(u32); 1706 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1707 break; 1708 case NVME_NS_DPS_PI_TYPE1: 1709 case NVME_NS_DPS_PI_TYPE2: 1710 integrity.profile = &t10_pi_type1_crc; 1711 integrity.tag_size = sizeof(u16); 1712 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1713 break; 1714 default: 1715 integrity.profile = NULL; 1716 break; 1717 } 1718 integrity.tuple_size = ms; 1719 blk_integrity_register(disk, &integrity); 1720 blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); 1721 } 1722 #else 1723 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type, 1724 u32 max_integrity_segments) 1725 { 1726 } 1727 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1728 1729 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) 1730 { 1731 struct nvme_ctrl *ctrl = ns->ctrl; 1732 struct request_queue *queue = disk->queue; 1733 u32 size = queue_logical_block_size(queue); 1734 1735 if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { 1736 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue); 1737 return; 1738 } 1739 1740 if (ctrl->nr_streams && ns->sws && ns->sgs) 1741 size *= ns->sws * ns->sgs; 1742 1743 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1744 NVME_DSM_MAX_RANGES); 1745 1746 queue->limits.discard_alignment = 0; 1747 queue->limits.discard_granularity = size; 1748 1749 /* If discard is already enabled, don't reset queue limits */ 1750 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue)) 1751 return; 1752 1753 blk_queue_max_discard_sectors(queue, UINT_MAX); 1754 blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); 1755 1756 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1757 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1758 } 1759 1760 static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) 1761 { 1762 u64 max_blocks; 1763 1764 if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || 1765 (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 1766 return; 1767 /* 1768 * Even though NVMe spec explicitly states that MDTS is not 1769 * applicable to the write-zeroes:- "The restriction does not apply to 1770 * commands that do not transfer data between the host and the 1771 * controller (e.g., Write Uncorrectable ro Write Zeroes command).". 1772 * In order to be more cautious use controller's max_hw_sectors value 1773 * to configure the maximum sectors for the write-zeroes which is 1774 * configured based on the controller's MDTS field in the 1775 * nvme_init_identify() if available. 1776 */ 1777 if (ns->ctrl->max_hw_sectors == UINT_MAX) 1778 max_blocks = (u64)USHRT_MAX + 1; 1779 else 1780 max_blocks = ns->ctrl->max_hw_sectors + 1; 1781 1782 blk_queue_max_write_zeroes_sectors(disk->queue, 1783 nvme_lba_to_sect(ns, max_blocks)); 1784 } 1785 1786 static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, 1787 struct nvme_id_ns *id, struct nvme_ns_ids *ids) 1788 { 1789 memset(ids, 0, sizeof(*ids)); 1790 1791 if (ctrl->vs >= NVME_VS(1, 1, 0)) 1792 memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); 1793 if (ctrl->vs >= NVME_VS(1, 2, 0)) 1794 memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); 1795 if (ctrl->vs >= NVME_VS(1, 3, 0)) 1796 return nvme_identify_ns_descs(ctrl, nsid, ids); 1797 return 0; 1798 } 1799 1800 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) 1801 { 1802 return !uuid_is_null(&ids->uuid) || 1803 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) || 1804 memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 1805 } 1806 1807 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1808 { 1809 return uuid_equal(&a->uuid, &b->uuid) && 1810 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1811 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0; 1812 } 1813 1814 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1815 u32 *phys_bs, u32 *io_opt) 1816 { 1817 struct streams_directive_params s; 1818 int ret; 1819 1820 if (!ctrl->nr_streams) 1821 return 0; 1822 1823 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); 1824 if (ret) 1825 return ret; 1826 1827 ns->sws = le32_to_cpu(s.sws); 1828 ns->sgs = le16_to_cpu(s.sgs); 1829 1830 if (ns->sws) { 1831 *phys_bs = ns->sws * (1 << ns->lba_shift); 1832 if (ns->sgs) 1833 *io_opt = *phys_bs * ns->sgs; 1834 } 1835 1836 return 0; 1837 } 1838 1839 static void nvme_update_disk_info(struct gendisk *disk, 1840 struct nvme_ns *ns, struct nvme_id_ns *id) 1841 { 1842 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); 1843 unsigned short bs = 1 << ns->lba_shift; 1844 u32 atomic_bs, phys_bs, io_opt = 0; 1845 1846 if (ns->lba_shift > PAGE_SHIFT) { 1847 /* unsupported block size, set capacity to 0 later */ 1848 bs = (1 << 9); 1849 } 1850 blk_mq_freeze_queue(disk->queue); 1851 blk_integrity_unregister(disk); 1852 1853 atomic_bs = phys_bs = bs; 1854 nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt); 1855 if (id->nabo == 0) { 1856 /* 1857 * Bit 1 indicates whether NAWUPF is defined for this namespace 1858 * and whether it should be used instead of AWUPF. If NAWUPF == 1859 * 0 then AWUPF must be used instead. 1860 */ 1861 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) 1862 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; 1863 else 1864 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; 1865 } 1866 1867 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { 1868 /* NPWG = Namespace Preferred Write Granularity */ 1869 phys_bs = bs * (1 + le16_to_cpu(id->npwg)); 1870 /* NOWS = Namespace Optimal Write Size */ 1871 io_opt = bs * (1 + le16_to_cpu(id->nows)); 1872 } 1873 1874 blk_queue_logical_block_size(disk->queue, bs); 1875 /* 1876 * Linux filesystems assume writing a single physical block is 1877 * an atomic operation. Hence limit the physical block size to the 1878 * value of the Atomic Write Unit Power Fail parameter. 1879 */ 1880 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); 1881 blk_queue_io_min(disk->queue, phys_bs); 1882 blk_queue_io_opt(disk->queue, io_opt); 1883 1884 /* 1885 * The block layer can't support LBA sizes larger than the page size 1886 * yet, so catch this early and don't allow block I/O. 1887 */ 1888 if (ns->lba_shift > PAGE_SHIFT) 1889 capacity = 0; 1890 1891 /* 1892 * Register a metadata profile for PI, or the plain non-integrity NVMe 1893 * metadata masquerading as Type 0 if supported, otherwise reject block 1894 * I/O to namespaces with metadata except when the namespace supports 1895 * PI, as it can strip/insert in that case. 1896 */ 1897 if (ns->ms) { 1898 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 1899 (ns->features & NVME_NS_METADATA_SUPPORTED)) 1900 nvme_init_integrity(disk, ns->ms, ns->pi_type, 1901 ns->ctrl->max_integrity_segments); 1902 else if (!nvme_ns_has_pi(ns)) 1903 capacity = 0; 1904 } 1905 1906 set_capacity_revalidate_and_notify(disk, capacity, false); 1907 1908 nvme_config_discard(disk, ns); 1909 nvme_config_write_zeroes(disk, ns); 1910 1911 if (id->nsattr & NVME_NS_ATTR_RO) 1912 set_disk_ro(disk, true); 1913 else 1914 set_disk_ro(disk, false); 1915 1916 blk_mq_unfreeze_queue(disk->queue); 1917 } 1918 1919 static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) 1920 { 1921 struct nvme_ns *ns = disk->private_data; 1922 struct nvme_ctrl *ctrl = ns->ctrl; 1923 u32 iob; 1924 1925 /* 1926 * If identify namespace failed, use default 512 byte block size so 1927 * block layer can use before failing read/write for 0 capacity. 1928 */ 1929 ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; 1930 if (ns->lba_shift == 0) 1931 ns->lba_shift = 9; 1932 1933 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 1934 is_power_of_2(ctrl->max_hw_sectors)) 1935 iob = ctrl->max_hw_sectors; 1936 else 1937 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); 1938 1939 ns->features = 0; 1940 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1941 /* the PI implementation requires metadata equal t10 pi tuple size */ 1942 if (ns->ms == sizeof(struct t10_pi_tuple)) 1943 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1944 else 1945 ns->pi_type = 0; 1946 1947 if (ns->ms) { 1948 /* 1949 * For PCIe only the separate metadata pointer is supported, 1950 * as the block layer supplies metadata in a separate bio_vec 1951 * chain. For Fabrics, only metadata as part of extended data 1952 * LBA is supported on the wire per the Fabrics specification, 1953 * but the HBA/HCA will do the remapping from the separate 1954 * metadata buffers for us. 1955 */ 1956 if (id->flbas & NVME_NS_FLBAS_META_EXT) { 1957 ns->features |= NVME_NS_EXT_LBAS; 1958 if ((ctrl->ops->flags & NVME_F_FABRICS) && 1959 (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) && 1960 ctrl->max_integrity_segments) 1961 ns->features |= NVME_NS_METADATA_SUPPORTED; 1962 } else { 1963 if (WARN_ON_ONCE(ctrl->ops->flags & NVME_F_FABRICS)) 1964 return -EINVAL; 1965 if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) 1966 ns->features |= NVME_NS_METADATA_SUPPORTED; 1967 } 1968 } 1969 1970 if (iob) 1971 blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob)); 1972 nvme_update_disk_info(disk, ns, id); 1973 #ifdef CONFIG_NVME_MULTIPATH 1974 if (ns->head->disk) { 1975 nvme_update_disk_info(ns->head->disk, ns, id); 1976 blk_queue_stack_limits(ns->head->disk->queue, ns->queue); 1977 revalidate_disk(ns->head->disk); 1978 } 1979 #endif 1980 return 0; 1981 } 1982 1983 static int nvme_revalidate_disk(struct gendisk *disk) 1984 { 1985 struct nvme_ns *ns = disk->private_data; 1986 struct nvme_ctrl *ctrl = ns->ctrl; 1987 struct nvme_id_ns *id; 1988 struct nvme_ns_ids ids; 1989 int ret = 0; 1990 1991 if (test_bit(NVME_NS_DEAD, &ns->flags)) { 1992 set_capacity(disk, 0); 1993 return -ENODEV; 1994 } 1995 1996 ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id); 1997 if (ret) 1998 goto out; 1999 2000 if (id->ncap == 0) { 2001 ret = -ENODEV; 2002 goto free_id; 2003 } 2004 2005 ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); 2006 if (ret) 2007 goto free_id; 2008 2009 if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) { 2010 dev_err(ctrl->device, 2011 "identifiers changed for nsid %d\n", ns->head->ns_id); 2012 ret = -ENODEV; 2013 goto free_id; 2014 } 2015 2016 ret = __nvme_revalidate_disk(disk, id); 2017 free_id: 2018 kfree(id); 2019 out: 2020 /* 2021 * Only fail the function if we got a fatal error back from the 2022 * device, otherwise ignore the error and just move on. 2023 */ 2024 if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR))) 2025 ret = 0; 2026 else if (ret > 0) 2027 ret = blk_status_to_errno(nvme_error_status(ret)); 2028 return ret; 2029 } 2030 2031 static char nvme_pr_type(enum pr_type type) 2032 { 2033 switch (type) { 2034 case PR_WRITE_EXCLUSIVE: 2035 return 1; 2036 case PR_EXCLUSIVE_ACCESS: 2037 return 2; 2038 case PR_WRITE_EXCLUSIVE_REG_ONLY: 2039 return 3; 2040 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 2041 return 4; 2042 case PR_WRITE_EXCLUSIVE_ALL_REGS: 2043 return 5; 2044 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 2045 return 6; 2046 default: 2047 return 0; 2048 } 2049 }; 2050 2051 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 2052 u64 key, u64 sa_key, u8 op) 2053 { 2054 struct nvme_ns_head *head = NULL; 2055 struct nvme_ns *ns; 2056 struct nvme_command c; 2057 int srcu_idx, ret; 2058 u8 data[16] = { 0, }; 2059 2060 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 2061 if (unlikely(!ns)) 2062 return -EWOULDBLOCK; 2063 2064 put_unaligned_le64(key, &data[0]); 2065 put_unaligned_le64(sa_key, &data[8]); 2066 2067 memset(&c, 0, sizeof(c)); 2068 c.common.opcode = op; 2069 c.common.nsid = cpu_to_le32(ns->head->ns_id); 2070 c.common.cdw10 = cpu_to_le32(cdw10); 2071 2072 ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); 2073 nvme_put_ns_from_disk(head, srcu_idx); 2074 return ret; 2075 } 2076 2077 static int nvme_pr_register(struct block_device *bdev, u64 old, 2078 u64 new, unsigned flags) 2079 { 2080 u32 cdw10; 2081 2082 if (flags & ~PR_FL_IGNORE_KEY) 2083 return -EOPNOTSUPP; 2084 2085 cdw10 = old ? 2 : 0; 2086 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 2087 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 2088 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 2089 } 2090 2091 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 2092 enum pr_type type, unsigned flags) 2093 { 2094 u32 cdw10; 2095 2096 if (flags & ~PR_FL_IGNORE_KEY) 2097 return -EOPNOTSUPP; 2098 2099 cdw10 = nvme_pr_type(type) << 8; 2100 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 2101 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 2102 } 2103 2104 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 2105 enum pr_type type, bool abort) 2106 { 2107 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); 2108 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 2109 } 2110 2111 static int nvme_pr_clear(struct block_device *bdev, u64 key) 2112 { 2113 u32 cdw10 = 1 | (key ? 1 << 3 : 0); 2114 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); 2115 } 2116 2117 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 2118 { 2119 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0); 2120 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 2121 } 2122 2123 static const struct pr_ops nvme_pr_ops = { 2124 .pr_register = nvme_pr_register, 2125 .pr_reserve = nvme_pr_reserve, 2126 .pr_release = nvme_pr_release, 2127 .pr_preempt = nvme_pr_preempt, 2128 .pr_clear = nvme_pr_clear, 2129 }; 2130 2131 #ifdef CONFIG_BLK_SED_OPAL 2132 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 2133 bool send) 2134 { 2135 struct nvme_ctrl *ctrl = data; 2136 struct nvme_command cmd; 2137 2138 memset(&cmd, 0, sizeof(cmd)); 2139 if (send) 2140 cmd.common.opcode = nvme_admin_security_send; 2141 else 2142 cmd.common.opcode = nvme_admin_security_recv; 2143 cmd.common.nsid = 0; 2144 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 2145 cmd.common.cdw11 = cpu_to_le32(len); 2146 2147 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 2148 ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false); 2149 } 2150 EXPORT_SYMBOL_GPL(nvme_sec_submit); 2151 #endif /* CONFIG_BLK_SED_OPAL */ 2152 2153 static const struct block_device_operations nvme_fops = { 2154 .owner = THIS_MODULE, 2155 .ioctl = nvme_ioctl, 2156 .compat_ioctl = nvme_compat_ioctl, 2157 .open = nvme_open, 2158 .release = nvme_release, 2159 .getgeo = nvme_getgeo, 2160 .revalidate_disk= nvme_revalidate_disk, 2161 .pr_ops = &nvme_pr_ops, 2162 }; 2163 2164 #ifdef CONFIG_NVME_MULTIPATH 2165 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) 2166 { 2167 struct nvme_ns_head *head = bdev->bd_disk->private_data; 2168 2169 if (!kref_get_unless_zero(&head->ref)) 2170 return -ENXIO; 2171 return 0; 2172 } 2173 2174 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) 2175 { 2176 nvme_put_ns_head(disk->private_data); 2177 } 2178 2179 const struct block_device_operations nvme_ns_head_ops = { 2180 .owner = THIS_MODULE, 2181 .open = nvme_ns_head_open, 2182 .release = nvme_ns_head_release, 2183 .ioctl = nvme_ioctl, 2184 .compat_ioctl = nvme_compat_ioctl, 2185 .getgeo = nvme_getgeo, 2186 .pr_ops = &nvme_pr_ops, 2187 }; 2188 #endif /* CONFIG_NVME_MULTIPATH */ 2189 2190 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 2191 { 2192 unsigned long timeout = 2193 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 2194 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 2195 int ret; 2196 2197 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2198 if (csts == ~0) 2199 return -ENODEV; 2200 if ((csts & NVME_CSTS_RDY) == bit) 2201 break; 2202 2203 usleep_range(1000, 2000); 2204 if (fatal_signal_pending(current)) 2205 return -EINTR; 2206 if (time_after(jiffies, timeout)) { 2207 dev_err(ctrl->device, 2208 "Device not ready; aborting %s, CSTS=0x%x\n", 2209 enabled ? "initialisation" : "reset", csts); 2210 return -ENODEV; 2211 } 2212 } 2213 2214 return ret; 2215 } 2216 2217 /* 2218 * If the device has been passed off to us in an enabled state, just clear 2219 * the enabled bit. The spec says we should set the 'shutdown notification 2220 * bits', but doing so may cause the device to complete commands to the 2221 * admin queue ... and we don't know what memory that might be pointing at! 2222 */ 2223 int nvme_disable_ctrl(struct nvme_ctrl *ctrl) 2224 { 2225 int ret; 2226 2227 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2228 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 2229 2230 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2231 if (ret) 2232 return ret; 2233 2234 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 2235 msleep(NVME_QUIRK_DELAY_AMOUNT); 2236 2237 return nvme_wait_ready(ctrl, ctrl->cap, false); 2238 } 2239 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 2240 2241 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) 2242 { 2243 /* 2244 * Default to a 4K page size, with the intention to update this 2245 * path in the future to accomodate architectures with differing 2246 * kernel and IO page sizes. 2247 */ 2248 unsigned dev_page_min, page_shift = 12; 2249 int ret; 2250 2251 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2252 if (ret) { 2253 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2254 return ret; 2255 } 2256 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2257 2258 if (page_shift < dev_page_min) { 2259 dev_err(ctrl->device, 2260 "Minimum device page size %u too large for host (%u)\n", 2261 1 << dev_page_min, 1 << page_shift); 2262 return -ENODEV; 2263 } 2264 2265 ctrl->page_size = 1 << page_shift; 2266 2267 ctrl->ctrl_config = NVME_CC_CSS_NVM; 2268 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; 2269 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 2270 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 2271 ctrl->ctrl_config |= NVME_CC_ENABLE; 2272 2273 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2274 if (ret) 2275 return ret; 2276 return nvme_wait_ready(ctrl, ctrl->cap, true); 2277 } 2278 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 2279 2280 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 2281 { 2282 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); 2283 u32 csts; 2284 int ret; 2285 2286 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2287 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 2288 2289 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2290 if (ret) 2291 return ret; 2292 2293 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2294 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 2295 break; 2296 2297 msleep(100); 2298 if (fatal_signal_pending(current)) 2299 return -EINTR; 2300 if (time_after(jiffies, timeout)) { 2301 dev_err(ctrl->device, 2302 "Device shutdown incomplete; abort shutdown\n"); 2303 return -ENODEV; 2304 } 2305 } 2306 2307 return ret; 2308 } 2309 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 2310 2311 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 2312 struct request_queue *q) 2313 { 2314 bool vwc = false; 2315 2316 if (ctrl->max_hw_sectors) { 2317 u32 max_segments = 2318 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 2319 2320 max_segments = min_not_zero(max_segments, ctrl->max_segments); 2321 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 2322 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 2323 } 2324 blk_queue_virt_boundary(q, ctrl->page_size - 1); 2325 blk_queue_dma_alignment(q, 7); 2326 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 2327 vwc = true; 2328 blk_queue_write_cache(q, vwc, vwc); 2329 } 2330 2331 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 2332 { 2333 __le64 ts; 2334 int ret; 2335 2336 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 2337 return 0; 2338 2339 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 2340 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 2341 NULL); 2342 if (ret) 2343 dev_warn_once(ctrl->device, 2344 "could not set timestamp (%d)\n", ret); 2345 return ret; 2346 } 2347 2348 static int nvme_configure_acre(struct nvme_ctrl *ctrl) 2349 { 2350 struct nvme_feat_host_behavior *host; 2351 int ret; 2352 2353 /* Don't bother enabling the feature if retry delay is not reported */ 2354 if (!ctrl->crdt[0]) 2355 return 0; 2356 2357 host = kzalloc(sizeof(*host), GFP_KERNEL); 2358 if (!host) 2359 return 0; 2360 2361 host->acre = NVME_ENABLE_ACRE; 2362 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2363 host, sizeof(*host), NULL); 2364 kfree(host); 2365 return ret; 2366 } 2367 2368 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2369 { 2370 /* 2371 * APST (Autonomous Power State Transition) lets us program a 2372 * table of power state transitions that the controller will 2373 * perform automatically. We configure it with a simple 2374 * heuristic: we are willing to spend at most 2% of the time 2375 * transitioning between power states. Therefore, when running 2376 * in any given state, we will enter the next lower-power 2377 * non-operational state after waiting 50 * (enlat + exlat) 2378 * microseconds, as long as that state's exit latency is under 2379 * the requested maximum latency. 2380 * 2381 * We will not autonomously enter any non-operational state for 2382 * which the total latency exceeds ps_max_latency_us. Users 2383 * can set ps_max_latency_us to zero to turn off APST. 2384 */ 2385 2386 unsigned apste; 2387 struct nvme_feat_auto_pst *table; 2388 u64 max_lat_us = 0; 2389 int max_ps = -1; 2390 int ret; 2391 2392 /* 2393 * If APST isn't supported or if we haven't been initialized yet, 2394 * then don't do anything. 2395 */ 2396 if (!ctrl->apsta) 2397 return 0; 2398 2399 if (ctrl->npss > 31) { 2400 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2401 return 0; 2402 } 2403 2404 table = kzalloc(sizeof(*table), GFP_KERNEL); 2405 if (!table) 2406 return 0; 2407 2408 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2409 /* Turn off APST. */ 2410 apste = 0; 2411 dev_dbg(ctrl->device, "APST disabled\n"); 2412 } else { 2413 __le64 target = cpu_to_le64(0); 2414 int state; 2415 2416 /* 2417 * Walk through all states from lowest- to highest-power. 2418 * According to the spec, lower-numbered states use more 2419 * power. NPSS, despite the name, is the index of the 2420 * lowest-power state, not the number of states. 2421 */ 2422 for (state = (int)ctrl->npss; state >= 0; state--) { 2423 u64 total_latency_us, exit_latency_us, transition_ms; 2424 2425 if (target) 2426 table->entries[state] = target; 2427 2428 /* 2429 * Don't allow transitions to the deepest state 2430 * if it's quirked off. 2431 */ 2432 if (state == ctrl->npss && 2433 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2434 continue; 2435 2436 /* 2437 * Is this state a useful non-operational state for 2438 * higher-power states to autonomously transition to? 2439 */ 2440 if (!(ctrl->psd[state].flags & 2441 NVME_PS_FLAGS_NON_OP_STATE)) 2442 continue; 2443 2444 exit_latency_us = 2445 (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2446 if (exit_latency_us > ctrl->ps_max_latency_us) 2447 continue; 2448 2449 total_latency_us = 2450 exit_latency_us + 2451 le32_to_cpu(ctrl->psd[state].entry_lat); 2452 2453 /* 2454 * This state is good. Use it as the APST idle 2455 * target for higher power states. 2456 */ 2457 transition_ms = total_latency_us + 19; 2458 do_div(transition_ms, 20); 2459 if (transition_ms > (1 << 24) - 1) 2460 transition_ms = (1 << 24) - 1; 2461 2462 target = cpu_to_le64((state << 3) | 2463 (transition_ms << 8)); 2464 2465 if (max_ps == -1) 2466 max_ps = state; 2467 2468 if (total_latency_us > max_lat_us) 2469 max_lat_us = total_latency_us; 2470 } 2471 2472 apste = 1; 2473 2474 if (max_ps == -1) { 2475 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2476 } else { 2477 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2478 max_ps, max_lat_us, (int)sizeof(*table), table); 2479 } 2480 } 2481 2482 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2483 table, sizeof(*table), NULL); 2484 if (ret) 2485 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2486 2487 kfree(table); 2488 return ret; 2489 } 2490 2491 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2492 { 2493 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2494 u64 latency; 2495 2496 switch (val) { 2497 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2498 case PM_QOS_LATENCY_ANY: 2499 latency = U64_MAX; 2500 break; 2501 2502 default: 2503 latency = val; 2504 } 2505 2506 if (ctrl->ps_max_latency_us != latency) { 2507 ctrl->ps_max_latency_us = latency; 2508 nvme_configure_apst(ctrl); 2509 } 2510 } 2511 2512 struct nvme_core_quirk_entry { 2513 /* 2514 * NVMe model and firmware strings are padded with spaces. For 2515 * simplicity, strings in the quirk table are padded with NULLs 2516 * instead. 2517 */ 2518 u16 vid; 2519 const char *mn; 2520 const char *fr; 2521 unsigned long quirks; 2522 }; 2523 2524 static const struct nvme_core_quirk_entry core_quirks[] = { 2525 { 2526 /* 2527 * This Toshiba device seems to die using any APST states. See: 2528 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2529 */ 2530 .vid = 0x1179, 2531 .mn = "THNSF5256GPUK TOSHIBA", 2532 .quirks = NVME_QUIRK_NO_APST, 2533 }, 2534 { 2535 /* 2536 * This LiteON CL1-3D*-Q11 firmware version has a race 2537 * condition associated with actions related to suspend to idle 2538 * LiteON has resolved the problem in future firmware 2539 */ 2540 .vid = 0x14a4, 2541 .fr = "22301111", 2542 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2543 } 2544 }; 2545 2546 /* match is null-terminated but idstr is space-padded. */ 2547 static bool string_matches(const char *idstr, const char *match, size_t len) 2548 { 2549 size_t matchlen; 2550 2551 if (!match) 2552 return true; 2553 2554 matchlen = strlen(match); 2555 WARN_ON_ONCE(matchlen > len); 2556 2557 if (memcmp(idstr, match, matchlen)) 2558 return false; 2559 2560 for (; matchlen < len; matchlen++) 2561 if (idstr[matchlen] != ' ') 2562 return false; 2563 2564 return true; 2565 } 2566 2567 static bool quirk_matches(const struct nvme_id_ctrl *id, 2568 const struct nvme_core_quirk_entry *q) 2569 { 2570 return q->vid == le16_to_cpu(id->vid) && 2571 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2572 string_matches(id->fr, q->fr, sizeof(id->fr)); 2573 } 2574 2575 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2576 struct nvme_id_ctrl *id) 2577 { 2578 size_t nqnlen; 2579 int off; 2580 2581 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2582 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2583 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2584 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2585 return; 2586 } 2587 2588 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2589 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2590 } 2591 2592 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 2593 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2594 "nqn.2014.08.org.nvmexpress:%04x%04x", 2595 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2596 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2597 off += sizeof(id->sn); 2598 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2599 off += sizeof(id->mn); 2600 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2601 } 2602 2603 static void nvme_release_subsystem(struct device *dev) 2604 { 2605 struct nvme_subsystem *subsys = 2606 container_of(dev, struct nvme_subsystem, dev); 2607 2608 if (subsys->instance >= 0) 2609 ida_simple_remove(&nvme_instance_ida, subsys->instance); 2610 kfree(subsys); 2611 } 2612 2613 static void nvme_destroy_subsystem(struct kref *ref) 2614 { 2615 struct nvme_subsystem *subsys = 2616 container_of(ref, struct nvme_subsystem, ref); 2617 2618 mutex_lock(&nvme_subsystems_lock); 2619 list_del(&subsys->entry); 2620 mutex_unlock(&nvme_subsystems_lock); 2621 2622 ida_destroy(&subsys->ns_ida); 2623 device_del(&subsys->dev); 2624 put_device(&subsys->dev); 2625 } 2626 2627 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2628 { 2629 kref_put(&subsys->ref, nvme_destroy_subsystem); 2630 } 2631 2632 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2633 { 2634 struct nvme_subsystem *subsys; 2635 2636 lockdep_assert_held(&nvme_subsystems_lock); 2637 2638 /* 2639 * Fail matches for discovery subsystems. This results 2640 * in each discovery controller bound to a unique subsystem. 2641 * This avoids issues with validating controller values 2642 * that can only be true when there is a single unique subsystem. 2643 * There may be multiple and completely independent entities 2644 * that provide discovery controllers. 2645 */ 2646 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) 2647 return NULL; 2648 2649 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2650 if (strcmp(subsys->subnqn, subsysnqn)) 2651 continue; 2652 if (!kref_get_unless_zero(&subsys->ref)) 2653 continue; 2654 return subsys; 2655 } 2656 2657 return NULL; 2658 } 2659 2660 #define SUBSYS_ATTR_RO(_name, _mode, _show) \ 2661 struct device_attribute subsys_attr_##_name = \ 2662 __ATTR(_name, _mode, _show, NULL) 2663 2664 static ssize_t nvme_subsys_show_nqn(struct device *dev, 2665 struct device_attribute *attr, 2666 char *buf) 2667 { 2668 struct nvme_subsystem *subsys = 2669 container_of(dev, struct nvme_subsystem, dev); 2670 2671 return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); 2672 } 2673 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2674 2675 #define nvme_subsys_show_str_function(field) \ 2676 static ssize_t subsys_##field##_show(struct device *dev, \ 2677 struct device_attribute *attr, char *buf) \ 2678 { \ 2679 struct nvme_subsystem *subsys = \ 2680 container_of(dev, struct nvme_subsystem, dev); \ 2681 return sprintf(buf, "%.*s\n", \ 2682 (int)sizeof(subsys->field), subsys->field); \ 2683 } \ 2684 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 2685 2686 nvme_subsys_show_str_function(model); 2687 nvme_subsys_show_str_function(serial); 2688 nvme_subsys_show_str_function(firmware_rev); 2689 2690 static struct attribute *nvme_subsys_attrs[] = { 2691 &subsys_attr_model.attr, 2692 &subsys_attr_serial.attr, 2693 &subsys_attr_firmware_rev.attr, 2694 &subsys_attr_subsysnqn.attr, 2695 #ifdef CONFIG_NVME_MULTIPATH 2696 &subsys_attr_iopolicy.attr, 2697 #endif 2698 NULL, 2699 }; 2700 2701 static struct attribute_group nvme_subsys_attrs_group = { 2702 .attrs = nvme_subsys_attrs, 2703 }; 2704 2705 static const struct attribute_group *nvme_subsys_attrs_groups[] = { 2706 &nvme_subsys_attrs_group, 2707 NULL, 2708 }; 2709 2710 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 2711 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2712 { 2713 struct nvme_ctrl *tmp; 2714 2715 lockdep_assert_held(&nvme_subsystems_lock); 2716 2717 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { 2718 if (nvme_state_terminal(tmp)) 2719 continue; 2720 2721 if (tmp->cntlid == ctrl->cntlid) { 2722 dev_err(ctrl->device, 2723 "Duplicate cntlid %u with %s, rejecting\n", 2724 ctrl->cntlid, dev_name(tmp->device)); 2725 return false; 2726 } 2727 2728 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || 2729 (ctrl->opts && ctrl->opts->discovery_nqn)) 2730 continue; 2731 2732 dev_err(ctrl->device, 2733 "Subsystem does not support multiple controllers\n"); 2734 return false; 2735 } 2736 2737 return true; 2738 } 2739 2740 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2741 { 2742 struct nvme_subsystem *subsys, *found; 2743 int ret; 2744 2745 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2746 if (!subsys) 2747 return -ENOMEM; 2748 2749 subsys->instance = -1; 2750 mutex_init(&subsys->lock); 2751 kref_init(&subsys->ref); 2752 INIT_LIST_HEAD(&subsys->ctrls); 2753 INIT_LIST_HEAD(&subsys->nsheads); 2754 nvme_init_subnqn(subsys, ctrl, id); 2755 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2756 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2757 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); 2758 subsys->vendor_id = le16_to_cpu(id->vid); 2759 subsys->cmic = id->cmic; 2760 subsys->awupf = le16_to_cpu(id->awupf); 2761 #ifdef CONFIG_NVME_MULTIPATH 2762 subsys->iopolicy = NVME_IOPOLICY_NUMA; 2763 #endif 2764 2765 subsys->dev.class = nvme_subsys_class; 2766 subsys->dev.release = nvme_release_subsystem; 2767 subsys->dev.groups = nvme_subsys_attrs_groups; 2768 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); 2769 device_initialize(&subsys->dev); 2770 2771 mutex_lock(&nvme_subsystems_lock); 2772 found = __nvme_find_get_subsystem(subsys->subnqn); 2773 if (found) { 2774 put_device(&subsys->dev); 2775 subsys = found; 2776 2777 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 2778 ret = -EINVAL; 2779 goto out_put_subsystem; 2780 } 2781 } else { 2782 ret = device_add(&subsys->dev); 2783 if (ret) { 2784 dev_err(ctrl->device, 2785 "failed to register subsystem device.\n"); 2786 put_device(&subsys->dev); 2787 goto out_unlock; 2788 } 2789 ida_init(&subsys->ns_ida); 2790 list_add_tail(&subsys->entry, &nvme_subsystems); 2791 } 2792 2793 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2794 dev_name(ctrl->device)); 2795 if (ret) { 2796 dev_err(ctrl->device, 2797 "failed to create sysfs link from subsystem.\n"); 2798 goto out_put_subsystem; 2799 } 2800 2801 if (!found) 2802 subsys->instance = ctrl->instance; 2803 ctrl->subsys = subsys; 2804 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2805 mutex_unlock(&nvme_subsystems_lock); 2806 return 0; 2807 2808 out_put_subsystem: 2809 nvme_put_subsystem(subsys); 2810 out_unlock: 2811 mutex_unlock(&nvme_subsystems_lock); 2812 return ret; 2813 } 2814 2815 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, 2816 void *log, size_t size, u64 offset) 2817 { 2818 struct nvme_command c = { }; 2819 u32 dwlen = nvme_bytes_to_numd(size); 2820 2821 c.get_log_page.opcode = nvme_admin_get_log_page; 2822 c.get_log_page.nsid = cpu_to_le32(nsid); 2823 c.get_log_page.lid = log_page; 2824 c.get_log_page.lsp = lsp; 2825 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 2826 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 2827 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 2828 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 2829 2830 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2831 } 2832 2833 static int nvme_get_effects_log(struct nvme_ctrl *ctrl) 2834 { 2835 int ret; 2836 2837 if (!ctrl->effects) 2838 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); 2839 2840 if (!ctrl->effects) 2841 return 0; 2842 2843 ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, 2844 ctrl->effects, sizeof(*ctrl->effects), 0); 2845 if (ret) { 2846 kfree(ctrl->effects); 2847 ctrl->effects = NULL; 2848 } 2849 return ret; 2850 } 2851 2852 /* 2853 * Initialize the cached copies of the Identify data and various controller 2854 * register in our nvme_ctrl structure. This should be called as soon as 2855 * the admin queue is fully up and running. 2856 */ 2857 int nvme_init_identify(struct nvme_ctrl *ctrl) 2858 { 2859 struct nvme_id_ctrl *id; 2860 int ret, page_shift; 2861 u32 max_hw_sectors; 2862 bool prev_apst_enabled; 2863 2864 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 2865 if (ret) { 2866 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 2867 return ret; 2868 } 2869 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2870 ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); 2871 2872 if (ctrl->vs >= NVME_VS(1, 1, 0)) 2873 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); 2874 2875 ret = nvme_identify_ctrl(ctrl, &id); 2876 if (ret) { 2877 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 2878 return -EIO; 2879 } 2880 2881 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 2882 ret = nvme_get_effects_log(ctrl); 2883 if (ret < 0) 2884 goto out_free; 2885 } 2886 2887 if (!(ctrl->ops->flags & NVME_F_FABRICS)) 2888 ctrl->cntlid = le16_to_cpu(id->cntlid); 2889 2890 if (!ctrl->identified) { 2891 int i; 2892 2893 ret = nvme_init_subsystem(ctrl, id); 2894 if (ret) 2895 goto out_free; 2896 2897 /* 2898 * Check for quirks. Quirk can depend on firmware version, 2899 * so, in principle, the set of quirks present can change 2900 * across a reset. As a possible future enhancement, we 2901 * could re-scan for quirks every time we reinitialize 2902 * the device, but we'd have to make sure that the driver 2903 * behaves intelligently if the quirks change. 2904 */ 2905 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 2906 if (quirk_matches(id, &core_quirks[i])) 2907 ctrl->quirks |= core_quirks[i].quirks; 2908 } 2909 } 2910 2911 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 2912 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 2913 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 2914 } 2915 2916 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 2917 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 2918 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 2919 2920 ctrl->oacs = le16_to_cpu(id->oacs); 2921 ctrl->oncs = le16_to_cpu(id->oncs); 2922 ctrl->mtfa = le16_to_cpu(id->mtfa); 2923 ctrl->oaes = le32_to_cpu(id->oaes); 2924 ctrl->wctemp = le16_to_cpu(id->wctemp); 2925 ctrl->cctemp = le16_to_cpu(id->cctemp); 2926 2927 atomic_set(&ctrl->abort_limit, id->acl + 1); 2928 ctrl->vwc = id->vwc; 2929 if (id->mdts) 2930 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 2931 else 2932 max_hw_sectors = UINT_MAX; 2933 ctrl->max_hw_sectors = 2934 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 2935 2936 nvme_set_queue_limits(ctrl, ctrl->admin_q); 2937 ctrl->sgls = le32_to_cpu(id->sgls); 2938 ctrl->kas = le16_to_cpu(id->kas); 2939 ctrl->max_namespaces = le32_to_cpu(id->mnan); 2940 ctrl->ctratt = le32_to_cpu(id->ctratt); 2941 2942 if (id->rtd3e) { 2943 /* us -> s */ 2944 u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000; 2945 2946 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 2947 shutdown_timeout, 60); 2948 2949 if (ctrl->shutdown_timeout != shutdown_timeout) 2950 dev_info(ctrl->device, 2951 "Shutdown timeout set to %u seconds\n", 2952 ctrl->shutdown_timeout); 2953 } else 2954 ctrl->shutdown_timeout = shutdown_timeout; 2955 2956 ctrl->npss = id->npss; 2957 ctrl->apsta = id->apsta; 2958 prev_apst_enabled = ctrl->apst_enabled; 2959 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 2960 if (force_apst && id->apsta) { 2961 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 2962 ctrl->apst_enabled = true; 2963 } else { 2964 ctrl->apst_enabled = false; 2965 } 2966 } else { 2967 ctrl->apst_enabled = id->apsta; 2968 } 2969 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 2970 2971 if (ctrl->ops->flags & NVME_F_FABRICS) { 2972 ctrl->icdoff = le16_to_cpu(id->icdoff); 2973 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 2974 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 2975 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 2976 2977 /* 2978 * In fabrics we need to verify the cntlid matches the 2979 * admin connect 2980 */ 2981 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 2982 dev_err(ctrl->device, 2983 "Mismatching cntlid: Connect %u vs Identify " 2984 "%u, rejecting\n", 2985 ctrl->cntlid, le16_to_cpu(id->cntlid)); 2986 ret = -EINVAL; 2987 goto out_free; 2988 } 2989 2990 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 2991 dev_err(ctrl->device, 2992 "keep-alive support is mandatory for fabrics\n"); 2993 ret = -EINVAL; 2994 goto out_free; 2995 } 2996 } else { 2997 ctrl->hmpre = le32_to_cpu(id->hmpre); 2998 ctrl->hmmin = le32_to_cpu(id->hmmin); 2999 ctrl->hmminds = le32_to_cpu(id->hmminds); 3000 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 3001 } 3002 3003 ret = nvme_mpath_init(ctrl, id); 3004 kfree(id); 3005 3006 if (ret < 0) 3007 return ret; 3008 3009 if (ctrl->apst_enabled && !prev_apst_enabled) 3010 dev_pm_qos_expose_latency_tolerance(ctrl->device); 3011 else if (!ctrl->apst_enabled && prev_apst_enabled) 3012 dev_pm_qos_hide_latency_tolerance(ctrl->device); 3013 3014 ret = nvme_configure_apst(ctrl); 3015 if (ret < 0) 3016 return ret; 3017 3018 ret = nvme_configure_timestamp(ctrl); 3019 if (ret < 0) 3020 return ret; 3021 3022 ret = nvme_configure_directives(ctrl); 3023 if (ret < 0) 3024 return ret; 3025 3026 ret = nvme_configure_acre(ctrl); 3027 if (ret < 0) 3028 return ret; 3029 3030 if (!ctrl->identified) 3031 nvme_hwmon_init(ctrl); 3032 3033 ctrl->identified = true; 3034 3035 return 0; 3036 3037 out_free: 3038 kfree(id); 3039 return ret; 3040 } 3041 EXPORT_SYMBOL_GPL(nvme_init_identify); 3042 3043 static int nvme_dev_open(struct inode *inode, struct file *file) 3044 { 3045 struct nvme_ctrl *ctrl = 3046 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3047 3048 switch (ctrl->state) { 3049 case NVME_CTRL_LIVE: 3050 break; 3051 default: 3052 return -EWOULDBLOCK; 3053 } 3054 3055 file->private_data = ctrl; 3056 return 0; 3057 } 3058 3059 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 3060 { 3061 struct nvme_ns *ns; 3062 int ret; 3063 3064 down_read(&ctrl->namespaces_rwsem); 3065 if (list_empty(&ctrl->namespaces)) { 3066 ret = -ENOTTY; 3067 goto out_unlock; 3068 } 3069 3070 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 3071 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 3072 dev_warn(ctrl->device, 3073 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 3074 ret = -EINVAL; 3075 goto out_unlock; 3076 } 3077 3078 dev_warn(ctrl->device, 3079 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 3080 kref_get(&ns->kref); 3081 up_read(&ctrl->namespaces_rwsem); 3082 3083 ret = nvme_user_cmd(ctrl, ns, argp); 3084 nvme_put_ns(ns); 3085 return ret; 3086 3087 out_unlock: 3088 up_read(&ctrl->namespaces_rwsem); 3089 return ret; 3090 } 3091 3092 static long nvme_dev_ioctl(struct file *file, unsigned int cmd, 3093 unsigned long arg) 3094 { 3095 struct nvme_ctrl *ctrl = file->private_data; 3096 void __user *argp = (void __user *)arg; 3097 3098 switch (cmd) { 3099 case NVME_IOCTL_ADMIN_CMD: 3100 return nvme_user_cmd(ctrl, NULL, argp); 3101 case NVME_IOCTL_ADMIN64_CMD: 3102 return nvme_user_cmd64(ctrl, NULL, argp); 3103 case NVME_IOCTL_IO_CMD: 3104 return nvme_dev_user_cmd(ctrl, argp); 3105 case NVME_IOCTL_RESET: 3106 dev_warn(ctrl->device, "resetting controller\n"); 3107 return nvme_reset_ctrl_sync(ctrl); 3108 case NVME_IOCTL_SUBSYS_RESET: 3109 return nvme_reset_subsystem(ctrl); 3110 case NVME_IOCTL_RESCAN: 3111 nvme_queue_scan(ctrl); 3112 return 0; 3113 default: 3114 return -ENOTTY; 3115 } 3116 } 3117 3118 static const struct file_operations nvme_dev_fops = { 3119 .owner = THIS_MODULE, 3120 .open = nvme_dev_open, 3121 .unlocked_ioctl = nvme_dev_ioctl, 3122 .compat_ioctl = compat_ptr_ioctl, 3123 }; 3124 3125 static ssize_t nvme_sysfs_reset(struct device *dev, 3126 struct device_attribute *attr, const char *buf, 3127 size_t count) 3128 { 3129 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3130 int ret; 3131 3132 ret = nvme_reset_ctrl_sync(ctrl); 3133 if (ret < 0) 3134 return ret; 3135 return count; 3136 } 3137 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 3138 3139 static ssize_t nvme_sysfs_rescan(struct device *dev, 3140 struct device_attribute *attr, const char *buf, 3141 size_t count) 3142 { 3143 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3144 3145 nvme_queue_scan(ctrl); 3146 return count; 3147 } 3148 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 3149 3150 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 3151 { 3152 struct gendisk *disk = dev_to_disk(dev); 3153 3154 if (disk->fops == &nvme_fops) 3155 return nvme_get_ns_from_dev(dev)->head; 3156 else 3157 return disk->private_data; 3158 } 3159 3160 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 3161 char *buf) 3162 { 3163 struct nvme_ns_head *head = dev_to_ns_head(dev); 3164 struct nvme_ns_ids *ids = &head->ids; 3165 struct nvme_subsystem *subsys = head->subsys; 3166 int serial_len = sizeof(subsys->serial); 3167 int model_len = sizeof(subsys->model); 3168 3169 if (!uuid_is_null(&ids->uuid)) 3170 return sprintf(buf, "uuid.%pU\n", &ids->uuid); 3171 3172 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3173 return sprintf(buf, "eui.%16phN\n", ids->nguid); 3174 3175 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3176 return sprintf(buf, "eui.%8phN\n", ids->eui64); 3177 3178 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 3179 subsys->serial[serial_len - 1] == '\0')) 3180 serial_len--; 3181 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 3182 subsys->model[model_len - 1] == '\0')) 3183 model_len--; 3184 3185 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 3186 serial_len, subsys->serial, model_len, subsys->model, 3187 head->ns_id); 3188 } 3189 static DEVICE_ATTR_RO(wwid); 3190 3191 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 3192 char *buf) 3193 { 3194 return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 3195 } 3196 static DEVICE_ATTR_RO(nguid); 3197 3198 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 3199 char *buf) 3200 { 3201 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3202 3203 /* For backward compatibility expose the NGUID to userspace if 3204 * we have no UUID set 3205 */ 3206 if (uuid_is_null(&ids->uuid)) { 3207 printk_ratelimited(KERN_WARNING 3208 "No UUID available providing old NGUID\n"); 3209 return sprintf(buf, "%pU\n", ids->nguid); 3210 } 3211 return sprintf(buf, "%pU\n", &ids->uuid); 3212 } 3213 static DEVICE_ATTR_RO(uuid); 3214 3215 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 3216 char *buf) 3217 { 3218 return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 3219 } 3220 static DEVICE_ATTR_RO(eui); 3221 3222 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 3223 char *buf) 3224 { 3225 return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 3226 } 3227 static DEVICE_ATTR_RO(nsid); 3228 3229 static struct attribute *nvme_ns_id_attrs[] = { 3230 &dev_attr_wwid.attr, 3231 &dev_attr_uuid.attr, 3232 &dev_attr_nguid.attr, 3233 &dev_attr_eui.attr, 3234 &dev_attr_nsid.attr, 3235 #ifdef CONFIG_NVME_MULTIPATH 3236 &dev_attr_ana_grpid.attr, 3237 &dev_attr_ana_state.attr, 3238 #endif 3239 NULL, 3240 }; 3241 3242 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, 3243 struct attribute *a, int n) 3244 { 3245 struct device *dev = container_of(kobj, struct device, kobj); 3246 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3247 3248 if (a == &dev_attr_uuid.attr) { 3249 if (uuid_is_null(&ids->uuid) && 3250 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3251 return 0; 3252 } 3253 if (a == &dev_attr_nguid.attr) { 3254 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3255 return 0; 3256 } 3257 if (a == &dev_attr_eui.attr) { 3258 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3259 return 0; 3260 } 3261 #ifdef CONFIG_NVME_MULTIPATH 3262 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { 3263 if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */ 3264 return 0; 3265 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) 3266 return 0; 3267 } 3268 #endif 3269 return a->mode; 3270 } 3271 3272 static const struct attribute_group nvme_ns_id_attr_group = { 3273 .attrs = nvme_ns_id_attrs, 3274 .is_visible = nvme_ns_id_attrs_are_visible, 3275 }; 3276 3277 const struct attribute_group *nvme_ns_id_attr_groups[] = { 3278 &nvme_ns_id_attr_group, 3279 #ifdef CONFIG_NVM 3280 &nvme_nvm_attr_group, 3281 #endif 3282 NULL, 3283 }; 3284 3285 #define nvme_show_str_function(field) \ 3286 static ssize_t field##_show(struct device *dev, \ 3287 struct device_attribute *attr, char *buf) \ 3288 { \ 3289 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3290 return sprintf(buf, "%.*s\n", \ 3291 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 3292 } \ 3293 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3294 3295 nvme_show_str_function(model); 3296 nvme_show_str_function(serial); 3297 nvme_show_str_function(firmware_rev); 3298 3299 #define nvme_show_int_function(field) \ 3300 static ssize_t field##_show(struct device *dev, \ 3301 struct device_attribute *attr, char *buf) \ 3302 { \ 3303 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3304 return sprintf(buf, "%d\n", ctrl->field); \ 3305 } \ 3306 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3307 3308 nvme_show_int_function(cntlid); 3309 nvme_show_int_function(numa_node); 3310 nvme_show_int_function(queue_count); 3311 nvme_show_int_function(sqsize); 3312 3313 static ssize_t nvme_sysfs_delete(struct device *dev, 3314 struct device_attribute *attr, const char *buf, 3315 size_t count) 3316 { 3317 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3318 3319 /* Can't delete non-created controllers */ 3320 if (!ctrl->created) 3321 return -EBUSY; 3322 3323 if (device_remove_file_self(dev, attr)) 3324 nvme_delete_ctrl_sync(ctrl); 3325 return count; 3326 } 3327 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 3328 3329 static ssize_t nvme_sysfs_show_transport(struct device *dev, 3330 struct device_attribute *attr, 3331 char *buf) 3332 { 3333 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3334 3335 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); 3336 } 3337 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 3338 3339 static ssize_t nvme_sysfs_show_state(struct device *dev, 3340 struct device_attribute *attr, 3341 char *buf) 3342 { 3343 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3344 static const char *const state_name[] = { 3345 [NVME_CTRL_NEW] = "new", 3346 [NVME_CTRL_LIVE] = "live", 3347 [NVME_CTRL_RESETTING] = "resetting", 3348 [NVME_CTRL_CONNECTING] = "connecting", 3349 [NVME_CTRL_DELETING] = "deleting", 3350 [NVME_CTRL_DEAD] = "dead", 3351 }; 3352 3353 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 3354 state_name[ctrl->state]) 3355 return sprintf(buf, "%s\n", state_name[ctrl->state]); 3356 3357 return sprintf(buf, "unknown state\n"); 3358 } 3359 3360 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 3361 3362 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 3363 struct device_attribute *attr, 3364 char *buf) 3365 { 3366 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3367 3368 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); 3369 } 3370 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 3371 3372 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, 3373 struct device_attribute *attr, 3374 char *buf) 3375 { 3376 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3377 3378 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn); 3379 } 3380 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); 3381 3382 static ssize_t nvme_sysfs_show_hostid(struct device *dev, 3383 struct device_attribute *attr, 3384 char *buf) 3385 { 3386 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3387 3388 return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id); 3389 } 3390 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); 3391 3392 static ssize_t nvme_sysfs_show_address(struct device *dev, 3393 struct device_attribute *attr, 3394 char *buf) 3395 { 3396 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3397 3398 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 3399 } 3400 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 3401 3402 static struct attribute *nvme_dev_attrs[] = { 3403 &dev_attr_reset_controller.attr, 3404 &dev_attr_rescan_controller.attr, 3405 &dev_attr_model.attr, 3406 &dev_attr_serial.attr, 3407 &dev_attr_firmware_rev.attr, 3408 &dev_attr_cntlid.attr, 3409 &dev_attr_delete_controller.attr, 3410 &dev_attr_transport.attr, 3411 &dev_attr_subsysnqn.attr, 3412 &dev_attr_address.attr, 3413 &dev_attr_state.attr, 3414 &dev_attr_numa_node.attr, 3415 &dev_attr_queue_count.attr, 3416 &dev_attr_sqsize.attr, 3417 &dev_attr_hostnqn.attr, 3418 &dev_attr_hostid.attr, 3419 NULL 3420 }; 3421 3422 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 3423 struct attribute *a, int n) 3424 { 3425 struct device *dev = container_of(kobj, struct device, kobj); 3426 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3427 3428 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 3429 return 0; 3430 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 3431 return 0; 3432 if (a == &dev_attr_hostnqn.attr && !ctrl->opts) 3433 return 0; 3434 if (a == &dev_attr_hostid.attr && !ctrl->opts) 3435 return 0; 3436 3437 return a->mode; 3438 } 3439 3440 static struct attribute_group nvme_dev_attrs_group = { 3441 .attrs = nvme_dev_attrs, 3442 .is_visible = nvme_dev_attrs_are_visible, 3443 }; 3444 3445 static const struct attribute_group *nvme_dev_attr_groups[] = { 3446 &nvme_dev_attrs_group, 3447 NULL, 3448 }; 3449 3450 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, 3451 unsigned nsid) 3452 { 3453 struct nvme_ns_head *h; 3454 3455 lockdep_assert_held(&subsys->lock); 3456 3457 list_for_each_entry(h, &subsys->nsheads, entry) { 3458 if (h->ns_id == nsid && kref_get_unless_zero(&h->ref)) 3459 return h; 3460 } 3461 3462 return NULL; 3463 } 3464 3465 static int __nvme_check_ids(struct nvme_subsystem *subsys, 3466 struct nvme_ns_head *new) 3467 { 3468 struct nvme_ns_head *h; 3469 3470 lockdep_assert_held(&subsys->lock); 3471 3472 list_for_each_entry(h, &subsys->nsheads, entry) { 3473 if (nvme_ns_ids_valid(&new->ids) && 3474 nvme_ns_ids_equal(&new->ids, &h->ids)) 3475 return -EINVAL; 3476 } 3477 3478 return 0; 3479 } 3480 3481 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 3482 unsigned nsid, struct nvme_ns_ids *ids) 3483 { 3484 struct nvme_ns_head *head; 3485 size_t size = sizeof(*head); 3486 int ret = -ENOMEM; 3487 3488 #ifdef CONFIG_NVME_MULTIPATH 3489 size += num_possible_nodes() * sizeof(struct nvme_ns *); 3490 #endif 3491 3492 head = kzalloc(size, GFP_KERNEL); 3493 if (!head) 3494 goto out; 3495 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); 3496 if (ret < 0) 3497 goto out_free_head; 3498 head->instance = ret; 3499 INIT_LIST_HEAD(&head->list); 3500 ret = init_srcu_struct(&head->srcu); 3501 if (ret) 3502 goto out_ida_remove; 3503 head->subsys = ctrl->subsys; 3504 head->ns_id = nsid; 3505 head->ids = *ids; 3506 kref_init(&head->ref); 3507 3508 ret = __nvme_check_ids(ctrl->subsys, head); 3509 if (ret) { 3510 dev_err(ctrl->device, 3511 "duplicate IDs for nsid %d\n", nsid); 3512 goto out_cleanup_srcu; 3513 } 3514 3515 ret = nvme_mpath_alloc_disk(ctrl, head); 3516 if (ret) 3517 goto out_cleanup_srcu; 3518 3519 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 3520 3521 kref_get(&ctrl->subsys->ref); 3522 3523 return head; 3524 out_cleanup_srcu: 3525 cleanup_srcu_struct(&head->srcu); 3526 out_ida_remove: 3527 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); 3528 out_free_head: 3529 kfree(head); 3530 out: 3531 if (ret > 0) 3532 ret = blk_status_to_errno(nvme_error_status(ret)); 3533 return ERR_PTR(ret); 3534 } 3535 3536 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, 3537 struct nvme_id_ns *id) 3538 { 3539 struct nvme_ctrl *ctrl = ns->ctrl; 3540 bool is_shared = id->nmic & NVME_NS_NMIC_SHARED; 3541 struct nvme_ns_head *head = NULL; 3542 struct nvme_ns_ids ids; 3543 int ret = 0; 3544 3545 ret = nvme_report_ns_ids(ctrl, nsid, id, &ids); 3546 if (ret) { 3547 if (ret < 0) 3548 return ret; 3549 return blk_status_to_errno(nvme_error_status(ret)); 3550 } 3551 3552 mutex_lock(&ctrl->subsys->lock); 3553 head = nvme_find_ns_head(ctrl->subsys, nsid); 3554 if (!head) { 3555 head = nvme_alloc_ns_head(ctrl, nsid, &ids); 3556 if (IS_ERR(head)) { 3557 ret = PTR_ERR(head); 3558 goto out_unlock; 3559 } 3560 head->shared = is_shared; 3561 } else { 3562 ret = -EINVAL; 3563 if (!is_shared || !head->shared) { 3564 dev_err(ctrl->device, 3565 "Duplicate unshared namespace %d\n", nsid); 3566 goto out_put_ns_head; 3567 } 3568 if (!nvme_ns_ids_equal(&head->ids, &ids)) { 3569 dev_err(ctrl->device, 3570 "IDs don't match for shared namespace %d\n", 3571 nsid); 3572 goto out_put_ns_head; 3573 } 3574 } 3575 3576 list_add_tail(&ns->siblings, &head->list); 3577 ns->head = head; 3578 mutex_unlock(&ctrl->subsys->lock); 3579 return 0; 3580 3581 out_put_ns_head: 3582 nvme_put_ns_head(head); 3583 out_unlock: 3584 mutex_unlock(&ctrl->subsys->lock); 3585 return ret; 3586 } 3587 3588 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) 3589 { 3590 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 3591 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 3592 3593 return nsa->head->ns_id - nsb->head->ns_id; 3594 } 3595 3596 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3597 { 3598 struct nvme_ns *ns, *ret = NULL; 3599 3600 down_read(&ctrl->namespaces_rwsem); 3601 list_for_each_entry(ns, &ctrl->namespaces, list) { 3602 if (ns->head->ns_id == nsid) { 3603 if (!kref_get_unless_zero(&ns->kref)) 3604 continue; 3605 ret = ns; 3606 break; 3607 } 3608 if (ns->head->ns_id > nsid) 3609 break; 3610 } 3611 up_read(&ctrl->namespaces_rwsem); 3612 return ret; 3613 } 3614 3615 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3616 { 3617 struct nvme_ns *ns; 3618 struct gendisk *disk; 3619 struct nvme_id_ns *id; 3620 char disk_name[DISK_NAME_LEN]; 3621 int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret; 3622 3623 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 3624 if (!ns) 3625 return; 3626 3627 ns->queue = blk_mq_init_queue(ctrl->tagset); 3628 if (IS_ERR(ns->queue)) 3629 goto out_free_ns; 3630 3631 if (ctrl->opts && ctrl->opts->data_digest) 3632 ns->queue->backing_dev_info->capabilities 3633 |= BDI_CAP_STABLE_WRITES; 3634 3635 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 3636 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) 3637 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); 3638 3639 ns->queue->queuedata = ns; 3640 ns->ctrl = ctrl; 3641 3642 kref_init(&ns->kref); 3643 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 3644 3645 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 3646 nvme_set_queue_limits(ctrl, ns->queue); 3647 3648 ret = nvme_identify_ns(ctrl, nsid, &id); 3649 if (ret) 3650 goto out_free_queue; 3651 3652 if (id->ncap == 0) /* no namespace (legacy quirk) */ 3653 goto out_free_id; 3654 3655 ret = nvme_init_ns_head(ns, nsid, id); 3656 if (ret) 3657 goto out_free_id; 3658 nvme_set_disk_name(disk_name, ns, ctrl, &flags); 3659 3660 disk = alloc_disk_node(0, node); 3661 if (!disk) 3662 goto out_unlink_ns; 3663 3664 disk->fops = &nvme_fops; 3665 disk->private_data = ns; 3666 disk->queue = ns->queue; 3667 disk->flags = flags; 3668 memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); 3669 ns->disk = disk; 3670 3671 if (__nvme_revalidate_disk(disk, id)) 3672 goto out_put_disk; 3673 3674 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { 3675 ret = nvme_nvm_register(ns, disk_name, node); 3676 if (ret) { 3677 dev_warn(ctrl->device, "LightNVM init failure\n"); 3678 goto out_put_disk; 3679 } 3680 } 3681 3682 down_write(&ctrl->namespaces_rwsem); 3683 list_add_tail(&ns->list, &ctrl->namespaces); 3684 up_write(&ctrl->namespaces_rwsem); 3685 3686 nvme_get_ctrl(ctrl); 3687 3688 device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups); 3689 3690 nvme_mpath_add_disk(ns, id); 3691 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); 3692 kfree(id); 3693 3694 return; 3695 out_put_disk: 3696 /* prevent double queue cleanup */ 3697 ns->disk->queue = NULL; 3698 put_disk(ns->disk); 3699 out_unlink_ns: 3700 mutex_lock(&ctrl->subsys->lock); 3701 list_del_rcu(&ns->siblings); 3702 if (list_empty(&ns->head->list)) 3703 list_del_init(&ns->head->entry); 3704 mutex_unlock(&ctrl->subsys->lock); 3705 nvme_put_ns_head(ns->head); 3706 out_free_id: 3707 kfree(id); 3708 out_free_queue: 3709 blk_cleanup_queue(ns->queue); 3710 out_free_ns: 3711 kfree(ns); 3712 } 3713 3714 static void nvme_ns_remove(struct nvme_ns *ns) 3715 { 3716 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 3717 return; 3718 3719 nvme_fault_inject_fini(&ns->fault_inject); 3720 3721 mutex_lock(&ns->ctrl->subsys->lock); 3722 list_del_rcu(&ns->siblings); 3723 if (list_empty(&ns->head->list)) 3724 list_del_init(&ns->head->entry); 3725 mutex_unlock(&ns->ctrl->subsys->lock); 3726 3727 synchronize_rcu(); /* guarantee not available in head->list */ 3728 nvme_mpath_clear_current_path(ns); 3729 synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */ 3730 3731 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 3732 del_gendisk(ns->disk); 3733 blk_cleanup_queue(ns->queue); 3734 if (blk_get_integrity(ns->disk)) 3735 blk_integrity_unregister(ns->disk); 3736 } 3737 3738 down_write(&ns->ctrl->namespaces_rwsem); 3739 list_del_init(&ns->list); 3740 up_write(&ns->ctrl->namespaces_rwsem); 3741 3742 nvme_mpath_check_last_path(ns); 3743 nvme_put_ns(ns); 3744 } 3745 3746 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) 3747 { 3748 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); 3749 3750 if (ns) { 3751 nvme_ns_remove(ns); 3752 nvme_put_ns(ns); 3753 } 3754 } 3755 3756 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3757 { 3758 struct nvme_ns *ns; 3759 3760 ns = nvme_find_get_ns(ctrl, nsid); 3761 if (ns) { 3762 if (ns->disk && revalidate_disk(ns->disk)) 3763 nvme_ns_remove(ns); 3764 nvme_put_ns(ns); 3765 } else 3766 nvme_alloc_ns(ctrl, nsid); 3767 } 3768 3769 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 3770 unsigned nsid) 3771 { 3772 struct nvme_ns *ns, *next; 3773 LIST_HEAD(rm_list); 3774 3775 down_write(&ctrl->namespaces_rwsem); 3776 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3777 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) 3778 list_move_tail(&ns->list, &rm_list); 3779 } 3780 up_write(&ctrl->namespaces_rwsem); 3781 3782 list_for_each_entry_safe(ns, next, &rm_list, list) 3783 nvme_ns_remove(ns); 3784 3785 } 3786 3787 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) 3788 { 3789 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); 3790 __le32 *ns_list; 3791 u32 prev = 0; 3792 int ret = 0, i; 3793 3794 if (nvme_ctrl_limited_cns(ctrl)) 3795 return -EOPNOTSUPP; 3796 3797 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 3798 if (!ns_list) 3799 return -ENOMEM; 3800 3801 for (;;) { 3802 ret = nvme_identify_ns_list(ctrl, prev, ns_list); 3803 if (ret) 3804 goto free; 3805 3806 for (i = 0; i < nr_entries; i++) { 3807 u32 nsid = le32_to_cpu(ns_list[i]); 3808 3809 if (!nsid) /* end of the list? */ 3810 goto out; 3811 nvme_validate_ns(ctrl, nsid); 3812 while (++prev < nsid) 3813 nvme_ns_remove_by_nsid(ctrl, prev); 3814 } 3815 } 3816 out: 3817 nvme_remove_invalid_namespaces(ctrl, prev); 3818 free: 3819 kfree(ns_list); 3820 return ret; 3821 } 3822 3823 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) 3824 { 3825 struct nvme_id_ctrl *id; 3826 u32 nn, i; 3827 3828 if (nvme_identify_ctrl(ctrl, &id)) 3829 return; 3830 nn = le32_to_cpu(id->nn); 3831 kfree(id); 3832 3833 for (i = 1; i <= nn; i++) 3834 nvme_validate_ns(ctrl, i); 3835 3836 nvme_remove_invalid_namespaces(ctrl, nn); 3837 } 3838 3839 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 3840 { 3841 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 3842 __le32 *log; 3843 int error; 3844 3845 log = kzalloc(log_size, GFP_KERNEL); 3846 if (!log) 3847 return; 3848 3849 /* 3850 * We need to read the log to clear the AEN, but we don't want to rely 3851 * on it for the changed namespace information as userspace could have 3852 * raced with us in reading the log page, which could cause us to miss 3853 * updates. 3854 */ 3855 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log, 3856 log_size, 0); 3857 if (error) 3858 dev_warn(ctrl->device, 3859 "reading changed ns log failed: %d\n", error); 3860 3861 kfree(log); 3862 } 3863 3864 static void nvme_scan_work(struct work_struct *work) 3865 { 3866 struct nvme_ctrl *ctrl = 3867 container_of(work, struct nvme_ctrl, scan_work); 3868 3869 /* No tagset on a live ctrl means IO queues could not created */ 3870 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) 3871 return; 3872 3873 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 3874 dev_info(ctrl->device, "rescanning namespaces.\n"); 3875 nvme_clear_changed_ns_log(ctrl); 3876 } 3877 3878 mutex_lock(&ctrl->scan_lock); 3879 if (nvme_scan_ns_list(ctrl) != 0) 3880 nvme_scan_ns_sequential(ctrl); 3881 mutex_unlock(&ctrl->scan_lock); 3882 3883 down_write(&ctrl->namespaces_rwsem); 3884 list_sort(NULL, &ctrl->namespaces, ns_cmp); 3885 up_write(&ctrl->namespaces_rwsem); 3886 } 3887 3888 /* 3889 * This function iterates the namespace list unlocked to allow recovery from 3890 * controller failure. It is up to the caller to ensure the namespace list is 3891 * not modified by scan work while this function is executing. 3892 */ 3893 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 3894 { 3895 struct nvme_ns *ns, *next; 3896 LIST_HEAD(ns_list); 3897 3898 /* 3899 * make sure to requeue I/O to all namespaces as these 3900 * might result from the scan itself and must complete 3901 * for the scan_work to make progress 3902 */ 3903 nvme_mpath_clear_ctrl_paths(ctrl); 3904 3905 /* prevent racing with ns scanning */ 3906 flush_work(&ctrl->scan_work); 3907 3908 /* 3909 * The dead states indicates the controller was not gracefully 3910 * disconnected. In that case, we won't be able to flush any data while 3911 * removing the namespaces' disks; fail all the queues now to avoid 3912 * potentially having to clean up the failed sync later. 3913 */ 3914 if (ctrl->state == NVME_CTRL_DEAD) 3915 nvme_kill_queues(ctrl); 3916 3917 down_write(&ctrl->namespaces_rwsem); 3918 list_splice_init(&ctrl->namespaces, &ns_list); 3919 up_write(&ctrl->namespaces_rwsem); 3920 3921 list_for_each_entry_safe(ns, next, &ns_list, list) 3922 nvme_ns_remove(ns); 3923 } 3924 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 3925 3926 static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) 3927 { 3928 struct nvme_ctrl *ctrl = 3929 container_of(dev, struct nvme_ctrl, ctrl_device); 3930 struct nvmf_ctrl_options *opts = ctrl->opts; 3931 int ret; 3932 3933 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); 3934 if (ret) 3935 return ret; 3936 3937 if (opts) { 3938 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); 3939 if (ret) 3940 return ret; 3941 3942 ret = add_uevent_var(env, "NVME_TRSVCID=%s", 3943 opts->trsvcid ?: "none"); 3944 if (ret) 3945 return ret; 3946 3947 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", 3948 opts->host_traddr ?: "none"); 3949 } 3950 return ret; 3951 } 3952 3953 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 3954 { 3955 char *envp[2] = { NULL, NULL }; 3956 u32 aen_result = ctrl->aen_result; 3957 3958 ctrl->aen_result = 0; 3959 if (!aen_result) 3960 return; 3961 3962 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 3963 if (!envp[0]) 3964 return; 3965 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 3966 kfree(envp[0]); 3967 } 3968 3969 static void nvme_async_event_work(struct work_struct *work) 3970 { 3971 struct nvme_ctrl *ctrl = 3972 container_of(work, struct nvme_ctrl, async_event_work); 3973 3974 nvme_aen_uevent(ctrl); 3975 ctrl->ops->submit_async_event(ctrl); 3976 } 3977 3978 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 3979 { 3980 3981 u32 csts; 3982 3983 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 3984 return false; 3985 3986 if (csts == ~0) 3987 return false; 3988 3989 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 3990 } 3991 3992 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 3993 { 3994 struct nvme_fw_slot_info_log *log; 3995 3996 log = kmalloc(sizeof(*log), GFP_KERNEL); 3997 if (!log) 3998 return; 3999 4000 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log, 4001 sizeof(*log), 0)) 4002 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 4003 kfree(log); 4004 } 4005 4006 static void nvme_fw_act_work(struct work_struct *work) 4007 { 4008 struct nvme_ctrl *ctrl = container_of(work, 4009 struct nvme_ctrl, fw_act_work); 4010 unsigned long fw_act_timeout; 4011 4012 if (ctrl->mtfa) 4013 fw_act_timeout = jiffies + 4014 msecs_to_jiffies(ctrl->mtfa * 100); 4015 else 4016 fw_act_timeout = jiffies + 4017 msecs_to_jiffies(admin_timeout * 1000); 4018 4019 nvme_stop_queues(ctrl); 4020 while (nvme_ctrl_pp_status(ctrl)) { 4021 if (time_after(jiffies, fw_act_timeout)) { 4022 dev_warn(ctrl->device, 4023 "Fw activation timeout, reset controller\n"); 4024 nvme_try_sched_reset(ctrl); 4025 return; 4026 } 4027 msleep(100); 4028 } 4029 4030 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) 4031 return; 4032 4033 nvme_start_queues(ctrl); 4034 /* read FW slot information to clear the AER */ 4035 nvme_get_fw_slot_info(ctrl); 4036 } 4037 4038 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 4039 { 4040 u32 aer_notice_type = (result & 0xff00) >> 8; 4041 4042 trace_nvme_async_event(ctrl, aer_notice_type); 4043 4044 switch (aer_notice_type) { 4045 case NVME_AER_NOTICE_NS_CHANGED: 4046 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 4047 nvme_queue_scan(ctrl); 4048 break; 4049 case NVME_AER_NOTICE_FW_ACT_STARTING: 4050 /* 4051 * We are (ab)using the RESETTING state to prevent subsequent 4052 * recovery actions from interfering with the controller's 4053 * firmware activation. 4054 */ 4055 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 4056 queue_work(nvme_wq, &ctrl->fw_act_work); 4057 break; 4058 #ifdef CONFIG_NVME_MULTIPATH 4059 case NVME_AER_NOTICE_ANA: 4060 if (!ctrl->ana_log_buf) 4061 break; 4062 queue_work(nvme_wq, &ctrl->ana_work); 4063 break; 4064 #endif 4065 case NVME_AER_NOTICE_DISC_CHANGED: 4066 ctrl->aen_result = result; 4067 break; 4068 default: 4069 dev_warn(ctrl->device, "async event result %08x\n", result); 4070 } 4071 } 4072 4073 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 4074 volatile union nvme_result *res) 4075 { 4076 u32 result = le32_to_cpu(res->u32); 4077 u32 aer_type = result & 0x07; 4078 4079 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 4080 return; 4081 4082 switch (aer_type) { 4083 case NVME_AER_NOTICE: 4084 nvme_handle_aen_notice(ctrl, result); 4085 break; 4086 case NVME_AER_ERROR: 4087 case NVME_AER_SMART: 4088 case NVME_AER_CSS: 4089 case NVME_AER_VS: 4090 trace_nvme_async_event(ctrl, aer_type); 4091 ctrl->aen_result = result; 4092 break; 4093 default: 4094 break; 4095 } 4096 queue_work(nvme_wq, &ctrl->async_event_work); 4097 } 4098 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 4099 4100 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 4101 { 4102 nvme_mpath_stop(ctrl); 4103 nvme_stop_keep_alive(ctrl); 4104 flush_work(&ctrl->async_event_work); 4105 cancel_work_sync(&ctrl->fw_act_work); 4106 } 4107 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 4108 4109 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 4110 { 4111 if (ctrl->kato) 4112 nvme_start_keep_alive(ctrl); 4113 4114 nvme_enable_aen(ctrl); 4115 4116 if (ctrl->queue_count > 1) { 4117 nvme_queue_scan(ctrl); 4118 nvme_start_queues(ctrl); 4119 } 4120 ctrl->created = true; 4121 } 4122 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 4123 4124 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4125 { 4126 nvme_fault_inject_fini(&ctrl->fault_inject); 4127 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4128 cdev_device_del(&ctrl->cdev, ctrl->device); 4129 nvme_put_ctrl(ctrl); 4130 } 4131 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 4132 4133 static void nvme_free_ctrl(struct device *dev) 4134 { 4135 struct nvme_ctrl *ctrl = 4136 container_of(dev, struct nvme_ctrl, ctrl_device); 4137 struct nvme_subsystem *subsys = ctrl->subsys; 4138 4139 if (subsys && ctrl->instance != subsys->instance) 4140 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 4141 4142 kfree(ctrl->effects); 4143 nvme_mpath_uninit(ctrl); 4144 __free_page(ctrl->discard_page); 4145 4146 if (subsys) { 4147 mutex_lock(&nvme_subsystems_lock); 4148 list_del(&ctrl->subsys_entry); 4149 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 4150 mutex_unlock(&nvme_subsystems_lock); 4151 } 4152 4153 ctrl->ops->free_ctrl(ctrl); 4154 4155 if (subsys) 4156 nvme_put_subsystem(subsys); 4157 } 4158 4159 /* 4160 * Initialize a NVMe controller structures. This needs to be called during 4161 * earliest initialization so that we have the initialized structured around 4162 * during probing. 4163 */ 4164 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 4165 const struct nvme_ctrl_ops *ops, unsigned long quirks) 4166 { 4167 int ret; 4168 4169 ctrl->state = NVME_CTRL_NEW; 4170 spin_lock_init(&ctrl->lock); 4171 mutex_init(&ctrl->scan_lock); 4172 INIT_LIST_HEAD(&ctrl->namespaces); 4173 init_rwsem(&ctrl->namespaces_rwsem); 4174 ctrl->dev = dev; 4175 ctrl->ops = ops; 4176 ctrl->quirks = quirks; 4177 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 4178 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 4179 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 4180 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 4181 init_waitqueue_head(&ctrl->state_wq); 4182 4183 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 4184 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 4185 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 4186 4187 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 4188 PAGE_SIZE); 4189 ctrl->discard_page = alloc_page(GFP_KERNEL); 4190 if (!ctrl->discard_page) { 4191 ret = -ENOMEM; 4192 goto out; 4193 } 4194 4195 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); 4196 if (ret < 0) 4197 goto out; 4198 ctrl->instance = ret; 4199 4200 device_initialize(&ctrl->ctrl_device); 4201 ctrl->device = &ctrl->ctrl_device; 4202 ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance); 4203 ctrl->device->class = nvme_class; 4204 ctrl->device->parent = ctrl->dev; 4205 ctrl->device->groups = nvme_dev_attr_groups; 4206 ctrl->device->release = nvme_free_ctrl; 4207 dev_set_drvdata(ctrl->device, ctrl); 4208 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 4209 if (ret) 4210 goto out_release_instance; 4211 4212 nvme_get_ctrl(ctrl); 4213 cdev_init(&ctrl->cdev, &nvme_dev_fops); 4214 ctrl->cdev.owner = ops->module; 4215 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 4216 if (ret) 4217 goto out_free_name; 4218 4219 /* 4220 * Initialize latency tolerance controls. The sysfs files won't 4221 * be visible to userspace unless the device actually supports APST. 4222 */ 4223 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 4224 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 4225 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 4226 4227 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 4228 4229 return 0; 4230 out_free_name: 4231 nvme_put_ctrl(ctrl); 4232 kfree_const(ctrl->device->kobj.name); 4233 out_release_instance: 4234 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 4235 out: 4236 if (ctrl->discard_page) 4237 __free_page(ctrl->discard_page); 4238 return ret; 4239 } 4240 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 4241 4242 /** 4243 * nvme_kill_queues(): Ends all namespace queues 4244 * @ctrl: the dead controller that needs to end 4245 * 4246 * Call this function when the driver determines it is unable to get the 4247 * controller in a state capable of servicing IO. 4248 */ 4249 void nvme_kill_queues(struct nvme_ctrl *ctrl) 4250 { 4251 struct nvme_ns *ns; 4252 4253 down_read(&ctrl->namespaces_rwsem); 4254 4255 /* Forcibly unquiesce queues to avoid blocking dispatch */ 4256 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) 4257 blk_mq_unquiesce_queue(ctrl->admin_q); 4258 4259 list_for_each_entry(ns, &ctrl->namespaces, list) 4260 nvme_set_queue_dying(ns); 4261 4262 up_read(&ctrl->namespaces_rwsem); 4263 } 4264 EXPORT_SYMBOL_GPL(nvme_kill_queues); 4265 4266 void nvme_unfreeze(struct nvme_ctrl *ctrl) 4267 { 4268 struct nvme_ns *ns; 4269 4270 down_read(&ctrl->namespaces_rwsem); 4271 list_for_each_entry(ns, &ctrl->namespaces, list) 4272 blk_mq_unfreeze_queue(ns->queue); 4273 up_read(&ctrl->namespaces_rwsem); 4274 } 4275 EXPORT_SYMBOL_GPL(nvme_unfreeze); 4276 4277 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 4278 { 4279 struct nvme_ns *ns; 4280 4281 down_read(&ctrl->namespaces_rwsem); 4282 list_for_each_entry(ns, &ctrl->namespaces, list) { 4283 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 4284 if (timeout <= 0) 4285 break; 4286 } 4287 up_read(&ctrl->namespaces_rwsem); 4288 } 4289 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 4290 4291 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 4292 { 4293 struct nvme_ns *ns; 4294 4295 down_read(&ctrl->namespaces_rwsem); 4296 list_for_each_entry(ns, &ctrl->namespaces, list) 4297 blk_mq_freeze_queue_wait(ns->queue); 4298 up_read(&ctrl->namespaces_rwsem); 4299 } 4300 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 4301 4302 void nvme_start_freeze(struct nvme_ctrl *ctrl) 4303 { 4304 struct nvme_ns *ns; 4305 4306 down_read(&ctrl->namespaces_rwsem); 4307 list_for_each_entry(ns, &ctrl->namespaces, list) 4308 blk_freeze_queue_start(ns->queue); 4309 up_read(&ctrl->namespaces_rwsem); 4310 } 4311 EXPORT_SYMBOL_GPL(nvme_start_freeze); 4312 4313 void nvme_stop_queues(struct nvme_ctrl *ctrl) 4314 { 4315 struct nvme_ns *ns; 4316 4317 down_read(&ctrl->namespaces_rwsem); 4318 list_for_each_entry(ns, &ctrl->namespaces, list) 4319 blk_mq_quiesce_queue(ns->queue); 4320 up_read(&ctrl->namespaces_rwsem); 4321 } 4322 EXPORT_SYMBOL_GPL(nvme_stop_queues); 4323 4324 void nvme_start_queues(struct nvme_ctrl *ctrl) 4325 { 4326 struct nvme_ns *ns; 4327 4328 down_read(&ctrl->namespaces_rwsem); 4329 list_for_each_entry(ns, &ctrl->namespaces, list) 4330 blk_mq_unquiesce_queue(ns->queue); 4331 up_read(&ctrl->namespaces_rwsem); 4332 } 4333 EXPORT_SYMBOL_GPL(nvme_start_queues); 4334 4335 4336 void nvme_sync_queues(struct nvme_ctrl *ctrl) 4337 { 4338 struct nvme_ns *ns; 4339 4340 down_read(&ctrl->namespaces_rwsem); 4341 list_for_each_entry(ns, &ctrl->namespaces, list) 4342 blk_sync_queue(ns->queue); 4343 up_read(&ctrl->namespaces_rwsem); 4344 4345 if (ctrl->admin_q) 4346 blk_sync_queue(ctrl->admin_q); 4347 } 4348 EXPORT_SYMBOL_GPL(nvme_sync_queues); 4349 4350 /* 4351 * Check we didn't inadvertently grow the command structure sizes: 4352 */ 4353 static inline void _nvme_check_size(void) 4354 { 4355 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 4356 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 4357 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 4358 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 4359 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 4360 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 4361 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 4362 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 4363 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 4364 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 4365 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 4366 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 4367 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 4368 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 4369 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 4370 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 4371 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 4372 } 4373 4374 4375 static int __init nvme_core_init(void) 4376 { 4377 int result = -ENOMEM; 4378 4379 _nvme_check_size(); 4380 4381 nvme_wq = alloc_workqueue("nvme-wq", 4382 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4383 if (!nvme_wq) 4384 goto out; 4385 4386 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 4387 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4388 if (!nvme_reset_wq) 4389 goto destroy_wq; 4390 4391 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 4392 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4393 if (!nvme_delete_wq) 4394 goto destroy_reset_wq; 4395 4396 result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme"); 4397 if (result < 0) 4398 goto destroy_delete_wq; 4399 4400 nvme_class = class_create(THIS_MODULE, "nvme"); 4401 if (IS_ERR(nvme_class)) { 4402 result = PTR_ERR(nvme_class); 4403 goto unregister_chrdev; 4404 } 4405 nvme_class->dev_uevent = nvme_class_uevent; 4406 4407 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); 4408 if (IS_ERR(nvme_subsys_class)) { 4409 result = PTR_ERR(nvme_subsys_class); 4410 goto destroy_class; 4411 } 4412 return 0; 4413 4414 destroy_class: 4415 class_destroy(nvme_class); 4416 unregister_chrdev: 4417 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 4418 destroy_delete_wq: 4419 destroy_workqueue(nvme_delete_wq); 4420 destroy_reset_wq: 4421 destroy_workqueue(nvme_reset_wq); 4422 destroy_wq: 4423 destroy_workqueue(nvme_wq); 4424 out: 4425 return result; 4426 } 4427 4428 static void __exit nvme_core_exit(void) 4429 { 4430 class_destroy(nvme_subsys_class); 4431 class_destroy(nvme_class); 4432 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 4433 destroy_workqueue(nvme_delete_wq); 4434 destroy_workqueue(nvme_reset_wq); 4435 destroy_workqueue(nvme_wq); 4436 ida_destroy(&nvme_instance_ida); 4437 } 4438 4439 MODULE_LICENSE("GPL"); 4440 MODULE_VERSION("1.0"); 4441 module_init(nvme_core_init); 4442 module_exit(nvme_core_exit); 4443