1 /* 2 * NVM Express device driver 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/blkdev.h> 16 #include <linux/blk-mq.h> 17 #include <linux/delay.h> 18 #include <linux/errno.h> 19 #include <linux/hdreg.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/list_sort.h> 23 #include <linux/slab.h> 24 #include <linux/types.h> 25 #include <linux/pr.h> 26 #include <linux/ptrace.h> 27 #include <linux/nvme_ioctl.h> 28 #include <linux/t10-pi.h> 29 #include <linux/pm_qos.h> 30 #include <asm/unaligned.h> 31 32 #define CREATE_TRACE_POINTS 33 #include "trace.h" 34 35 #include "nvme.h" 36 #include "fabrics.h" 37 38 #define NVME_MINORS (1U << MINORBITS) 39 40 unsigned int admin_timeout = 60; 41 module_param(admin_timeout, uint, 0644); 42 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 43 EXPORT_SYMBOL_GPL(admin_timeout); 44 45 unsigned int nvme_io_timeout = 30; 46 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 47 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 48 EXPORT_SYMBOL_GPL(nvme_io_timeout); 49 50 static unsigned char shutdown_timeout = 5; 51 module_param(shutdown_timeout, byte, 0644); 52 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 53 54 static u8 nvme_max_retries = 5; 55 module_param_named(max_retries, nvme_max_retries, byte, 0644); 56 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 57 58 static unsigned long default_ps_max_latency_us = 100000; 59 module_param(default_ps_max_latency_us, ulong, 0644); 60 MODULE_PARM_DESC(default_ps_max_latency_us, 61 "max power saving latency for new devices; use PM QOS to change per device"); 62 63 static bool force_apst; 64 module_param(force_apst, bool, 0644); 65 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 66 67 static bool streams; 68 module_param(streams, bool, 0644); 69 MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); 70 71 /* 72 * nvme_wq - hosts nvme related works that are not reset or delete 73 * nvme_reset_wq - hosts nvme reset works 74 * nvme_delete_wq - hosts nvme delete works 75 * 76 * nvme_wq will host works such are scan, aen handling, fw activation, 77 * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq 78 * runs reset works which also flush works hosted on nvme_wq for 79 * serialization purposes. nvme_delete_wq host controller deletion 80 * works which flush reset works for serialization. 81 */ 82 struct workqueue_struct *nvme_wq; 83 EXPORT_SYMBOL_GPL(nvme_wq); 84 85 struct workqueue_struct *nvme_reset_wq; 86 EXPORT_SYMBOL_GPL(nvme_reset_wq); 87 88 struct workqueue_struct *nvme_delete_wq; 89 EXPORT_SYMBOL_GPL(nvme_delete_wq); 90 91 static DEFINE_IDA(nvme_subsystems_ida); 92 static LIST_HEAD(nvme_subsystems); 93 static DEFINE_MUTEX(nvme_subsystems_lock); 94 95 static DEFINE_IDA(nvme_instance_ida); 96 static dev_t nvme_chr_devt; 97 static struct class *nvme_class; 98 static struct class *nvme_subsys_class; 99 100 static void nvme_ns_remove(struct nvme_ns *ns); 101 static int nvme_revalidate_disk(struct gendisk *disk); 102 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 103 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 104 unsigned nsid); 105 106 static void nvme_set_queue_dying(struct nvme_ns *ns) 107 { 108 /* 109 * Revalidating a dead namespace sets capacity to 0. This will end 110 * buffered writers dirtying pages that can't be synced. 111 */ 112 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 113 return; 114 revalidate_disk(ns->disk); 115 blk_set_queue_dying(ns->queue); 116 /* Forcibly unquiesce queues to avoid blocking dispatch */ 117 blk_mq_unquiesce_queue(ns->queue); 118 } 119 120 static void nvme_queue_scan(struct nvme_ctrl *ctrl) 121 { 122 /* 123 * Only new queue scan work when admin and IO queues are both alive 124 */ 125 if (ctrl->state == NVME_CTRL_LIVE) 126 queue_work(nvme_wq, &ctrl->scan_work); 127 } 128 129 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 130 { 131 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 132 return -EBUSY; 133 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 134 return -EBUSY; 135 return 0; 136 } 137 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 138 139 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 140 { 141 int ret; 142 143 ret = nvme_reset_ctrl(ctrl); 144 if (!ret) { 145 flush_work(&ctrl->reset_work); 146 if (ctrl->state != NVME_CTRL_LIVE && 147 ctrl->state != NVME_CTRL_ADMIN_ONLY) 148 ret = -ENETRESET; 149 } 150 151 return ret; 152 } 153 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); 154 155 static void nvme_delete_ctrl_work(struct work_struct *work) 156 { 157 struct nvme_ctrl *ctrl = 158 container_of(work, struct nvme_ctrl, delete_work); 159 160 dev_info(ctrl->device, 161 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); 162 163 flush_work(&ctrl->reset_work); 164 nvme_stop_ctrl(ctrl); 165 nvme_remove_namespaces(ctrl); 166 ctrl->ops->delete_ctrl(ctrl); 167 nvme_uninit_ctrl(ctrl); 168 nvme_put_ctrl(ctrl); 169 } 170 171 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 172 { 173 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 174 return -EBUSY; 175 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 176 return -EBUSY; 177 return 0; 178 } 179 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 180 181 int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 182 { 183 int ret = 0; 184 185 /* 186 * Keep a reference until the work is flushed since ->delete_ctrl 187 * can free the controller. 188 */ 189 nvme_get_ctrl(ctrl); 190 ret = nvme_delete_ctrl(ctrl); 191 if (!ret) 192 flush_work(&ctrl->delete_work); 193 nvme_put_ctrl(ctrl); 194 return ret; 195 } 196 EXPORT_SYMBOL_GPL(nvme_delete_ctrl_sync); 197 198 static inline bool nvme_ns_has_pi(struct nvme_ns *ns) 199 { 200 return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); 201 } 202 203 static blk_status_t nvme_error_status(struct request *req) 204 { 205 switch (nvme_req(req)->status & 0x7ff) { 206 case NVME_SC_SUCCESS: 207 return BLK_STS_OK; 208 case NVME_SC_CAP_EXCEEDED: 209 return BLK_STS_NOSPC; 210 case NVME_SC_LBA_RANGE: 211 return BLK_STS_TARGET; 212 case NVME_SC_BAD_ATTRIBUTES: 213 case NVME_SC_ONCS_NOT_SUPPORTED: 214 case NVME_SC_INVALID_OPCODE: 215 case NVME_SC_INVALID_FIELD: 216 case NVME_SC_INVALID_NS: 217 return BLK_STS_NOTSUPP; 218 case NVME_SC_WRITE_FAULT: 219 case NVME_SC_READ_ERROR: 220 case NVME_SC_UNWRITTEN_BLOCK: 221 case NVME_SC_ACCESS_DENIED: 222 case NVME_SC_READ_ONLY: 223 case NVME_SC_COMPARE_FAILED: 224 return BLK_STS_MEDIUM; 225 case NVME_SC_GUARD_CHECK: 226 case NVME_SC_APPTAG_CHECK: 227 case NVME_SC_REFTAG_CHECK: 228 case NVME_SC_INVALID_PI: 229 return BLK_STS_PROTECTION; 230 case NVME_SC_RESERVATION_CONFLICT: 231 return BLK_STS_NEXUS; 232 default: 233 return BLK_STS_IOERR; 234 } 235 } 236 237 static inline bool nvme_req_needs_retry(struct request *req) 238 { 239 if (blk_noretry_request(req)) 240 return false; 241 if (nvme_req(req)->status & NVME_SC_DNR) 242 return false; 243 if (nvme_req(req)->retries >= nvme_max_retries) 244 return false; 245 return true; 246 } 247 248 void nvme_complete_rq(struct request *req) 249 { 250 blk_status_t status = nvme_error_status(req); 251 252 trace_nvme_complete_rq(req); 253 254 if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { 255 if ((req->cmd_flags & REQ_NVME_MPATH) && 256 blk_path_error(status)) { 257 nvme_failover_req(req); 258 return; 259 } 260 261 if (!blk_queue_dying(req->q)) { 262 nvme_req(req)->retries++; 263 blk_mq_requeue_request(req, true); 264 return; 265 } 266 } 267 blk_mq_end_request(req, status); 268 } 269 EXPORT_SYMBOL_GPL(nvme_complete_rq); 270 271 void nvme_cancel_request(struct request *req, void *data, bool reserved) 272 { 273 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 274 "Cancelling I/O %d", req->tag); 275 276 nvme_req(req)->status = NVME_SC_ABORT_REQ; 277 blk_mq_complete_request(req); 278 279 } 280 EXPORT_SYMBOL_GPL(nvme_cancel_request); 281 282 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 283 enum nvme_ctrl_state new_state) 284 { 285 enum nvme_ctrl_state old_state; 286 unsigned long flags; 287 bool changed = false; 288 289 spin_lock_irqsave(&ctrl->lock, flags); 290 291 old_state = ctrl->state; 292 switch (new_state) { 293 case NVME_CTRL_ADMIN_ONLY: 294 switch (old_state) { 295 case NVME_CTRL_CONNECTING: 296 changed = true; 297 /* FALLTHRU */ 298 default: 299 break; 300 } 301 break; 302 case NVME_CTRL_LIVE: 303 switch (old_state) { 304 case NVME_CTRL_NEW: 305 case NVME_CTRL_RESETTING: 306 case NVME_CTRL_CONNECTING: 307 changed = true; 308 /* FALLTHRU */ 309 default: 310 break; 311 } 312 break; 313 case NVME_CTRL_RESETTING: 314 switch (old_state) { 315 case NVME_CTRL_NEW: 316 case NVME_CTRL_LIVE: 317 case NVME_CTRL_ADMIN_ONLY: 318 changed = true; 319 /* FALLTHRU */ 320 default: 321 break; 322 } 323 break; 324 case NVME_CTRL_CONNECTING: 325 switch (old_state) { 326 case NVME_CTRL_NEW: 327 case NVME_CTRL_RESETTING: 328 changed = true; 329 /* FALLTHRU */ 330 default: 331 break; 332 } 333 break; 334 case NVME_CTRL_DELETING: 335 switch (old_state) { 336 case NVME_CTRL_LIVE: 337 case NVME_CTRL_ADMIN_ONLY: 338 case NVME_CTRL_RESETTING: 339 case NVME_CTRL_CONNECTING: 340 changed = true; 341 /* FALLTHRU */ 342 default: 343 break; 344 } 345 break; 346 case NVME_CTRL_DEAD: 347 switch (old_state) { 348 case NVME_CTRL_DELETING: 349 changed = true; 350 /* FALLTHRU */ 351 default: 352 break; 353 } 354 break; 355 default: 356 break; 357 } 358 359 if (changed) 360 ctrl->state = new_state; 361 362 spin_unlock_irqrestore(&ctrl->lock, flags); 363 if (changed && ctrl->state == NVME_CTRL_LIVE) 364 nvme_kick_requeue_lists(ctrl); 365 return changed; 366 } 367 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 368 369 static void nvme_free_ns_head(struct kref *ref) 370 { 371 struct nvme_ns_head *head = 372 container_of(ref, struct nvme_ns_head, ref); 373 374 nvme_mpath_remove_disk(head); 375 ida_simple_remove(&head->subsys->ns_ida, head->instance); 376 list_del_init(&head->entry); 377 cleanup_srcu_struct_quiesced(&head->srcu); 378 nvme_put_subsystem(head->subsys); 379 kfree(head); 380 } 381 382 static void nvme_put_ns_head(struct nvme_ns_head *head) 383 { 384 kref_put(&head->ref, nvme_free_ns_head); 385 } 386 387 static void nvme_free_ns(struct kref *kref) 388 { 389 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 390 391 if (ns->ndev) 392 nvme_nvm_unregister(ns); 393 394 put_disk(ns->disk); 395 nvme_put_ns_head(ns->head); 396 nvme_put_ctrl(ns->ctrl); 397 kfree(ns); 398 } 399 400 static void nvme_put_ns(struct nvme_ns *ns) 401 { 402 kref_put(&ns->kref, nvme_free_ns); 403 } 404 405 static inline void nvme_clear_nvme_request(struct request *req) 406 { 407 if (!(req->rq_flags & RQF_DONTPREP)) { 408 nvme_req(req)->retries = 0; 409 nvme_req(req)->flags = 0; 410 req->rq_flags |= RQF_DONTPREP; 411 } 412 } 413 414 struct request *nvme_alloc_request(struct request_queue *q, 415 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) 416 { 417 unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 418 struct request *req; 419 420 if (qid == NVME_QID_ANY) { 421 req = blk_mq_alloc_request(q, op, flags); 422 } else { 423 req = blk_mq_alloc_request_hctx(q, op, flags, 424 qid ? qid - 1 : 0); 425 } 426 if (IS_ERR(req)) 427 return req; 428 429 req->cmd_flags |= REQ_FAILFAST_DRIVER; 430 nvme_clear_nvme_request(req); 431 nvme_req(req)->cmd = cmd; 432 433 return req; 434 } 435 EXPORT_SYMBOL_GPL(nvme_alloc_request); 436 437 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) 438 { 439 struct nvme_command c; 440 441 memset(&c, 0, sizeof(c)); 442 443 c.directive.opcode = nvme_admin_directive_send; 444 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); 445 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; 446 c.directive.dtype = NVME_DIR_IDENTIFY; 447 c.directive.tdtype = NVME_DIR_STREAMS; 448 c.directive.endir = enable ? NVME_DIR_ENDIR : 0; 449 450 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); 451 } 452 453 static int nvme_disable_streams(struct nvme_ctrl *ctrl) 454 { 455 return nvme_toggle_streams(ctrl, false); 456 } 457 458 static int nvme_enable_streams(struct nvme_ctrl *ctrl) 459 { 460 return nvme_toggle_streams(ctrl, true); 461 } 462 463 static int nvme_get_stream_params(struct nvme_ctrl *ctrl, 464 struct streams_directive_params *s, u32 nsid) 465 { 466 struct nvme_command c; 467 468 memset(&c, 0, sizeof(c)); 469 memset(s, 0, sizeof(*s)); 470 471 c.directive.opcode = nvme_admin_directive_recv; 472 c.directive.nsid = cpu_to_le32(nsid); 473 c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); 474 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 475 c.directive.dtype = NVME_DIR_STREAMS; 476 477 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); 478 } 479 480 static int nvme_configure_directives(struct nvme_ctrl *ctrl) 481 { 482 struct streams_directive_params s; 483 int ret; 484 485 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) 486 return 0; 487 if (!streams) 488 return 0; 489 490 ret = nvme_enable_streams(ctrl); 491 if (ret) 492 return ret; 493 494 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); 495 if (ret) 496 return ret; 497 498 ctrl->nssa = le16_to_cpu(s.nssa); 499 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { 500 dev_info(ctrl->device, "too few streams (%u) available\n", 501 ctrl->nssa); 502 nvme_disable_streams(ctrl); 503 return 0; 504 } 505 506 ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); 507 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); 508 return 0; 509 } 510 511 /* 512 * Check if 'req' has a write hint associated with it. If it does, assign 513 * a valid namespace stream to the write. 514 */ 515 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, 516 struct request *req, u16 *control, 517 u32 *dsmgmt) 518 { 519 enum rw_hint streamid = req->write_hint; 520 521 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE) 522 streamid = 0; 523 else { 524 streamid--; 525 if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) 526 return; 527 528 *control |= NVME_RW_DTYPE_STREAMS; 529 *dsmgmt |= streamid << 16; 530 } 531 532 if (streamid < ARRAY_SIZE(req->q->write_hints)) 533 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; 534 } 535 536 static inline void nvme_setup_flush(struct nvme_ns *ns, 537 struct nvme_command *cmnd) 538 { 539 memset(cmnd, 0, sizeof(*cmnd)); 540 cmnd->common.opcode = nvme_cmd_flush; 541 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 542 } 543 544 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 545 struct nvme_command *cmnd) 546 { 547 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 548 struct nvme_dsm_range *range; 549 struct bio *bio; 550 551 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); 552 if (!range) 553 return BLK_STS_RESOURCE; 554 555 __rq_for_each_bio(bio, req) { 556 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 557 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 558 559 if (n < segments) { 560 range[n].cattr = cpu_to_le32(0); 561 range[n].nlb = cpu_to_le32(nlb); 562 range[n].slba = cpu_to_le64(slba); 563 } 564 n++; 565 } 566 567 if (WARN_ON_ONCE(n != segments)) { 568 kfree(range); 569 return BLK_STS_IOERR; 570 } 571 572 memset(cmnd, 0, sizeof(*cmnd)); 573 cmnd->dsm.opcode = nvme_cmd_dsm; 574 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 575 cmnd->dsm.nr = cpu_to_le32(segments - 1); 576 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 577 578 req->special_vec.bv_page = virt_to_page(range); 579 req->special_vec.bv_offset = offset_in_page(range); 580 req->special_vec.bv_len = sizeof(*range) * segments; 581 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 582 583 return BLK_STS_OK; 584 } 585 586 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 587 struct request *req, struct nvme_command *cmnd) 588 { 589 struct nvme_ctrl *ctrl = ns->ctrl; 590 u16 control = 0; 591 u32 dsmgmt = 0; 592 593 if (req->cmd_flags & REQ_FUA) 594 control |= NVME_RW_FUA; 595 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 596 control |= NVME_RW_LR; 597 598 if (req->cmd_flags & REQ_RAHEAD) 599 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 600 601 memset(cmnd, 0, sizeof(*cmnd)); 602 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); 603 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 604 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 605 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 606 607 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) 608 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); 609 610 if (ns->ms) { 611 /* 612 * If formated with metadata, the block layer always provides a 613 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 614 * we enable the PRACT bit for protection information or set the 615 * namespace capacity to zero to prevent any I/O. 616 */ 617 if (!blk_integrity_rq(req)) { 618 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 619 return BLK_STS_NOTSUPP; 620 control |= NVME_RW_PRINFO_PRACT; 621 } else if (req_op(req) == REQ_OP_WRITE) { 622 t10_pi_prepare(req, ns->pi_type); 623 } 624 625 switch (ns->pi_type) { 626 case NVME_NS_DPS_PI_TYPE3: 627 control |= NVME_RW_PRINFO_PRCHK_GUARD; 628 break; 629 case NVME_NS_DPS_PI_TYPE1: 630 case NVME_NS_DPS_PI_TYPE2: 631 control |= NVME_RW_PRINFO_PRCHK_GUARD | 632 NVME_RW_PRINFO_PRCHK_REF; 633 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 634 break; 635 } 636 } 637 638 cmnd->rw.control = cpu_to_le16(control); 639 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 640 return 0; 641 } 642 643 void nvme_cleanup_cmd(struct request *req) 644 { 645 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 646 nvme_req(req)->status == 0) { 647 struct nvme_ns *ns = req->rq_disk->private_data; 648 649 t10_pi_complete(req, ns->pi_type, 650 blk_rq_bytes(req) >> ns->lba_shift); 651 } 652 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 653 kfree(page_address(req->special_vec.bv_page) + 654 req->special_vec.bv_offset); 655 } 656 } 657 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 658 659 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 660 struct nvme_command *cmd) 661 { 662 blk_status_t ret = BLK_STS_OK; 663 664 nvme_clear_nvme_request(req); 665 666 switch (req_op(req)) { 667 case REQ_OP_DRV_IN: 668 case REQ_OP_DRV_OUT: 669 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); 670 break; 671 case REQ_OP_FLUSH: 672 nvme_setup_flush(ns, cmd); 673 break; 674 case REQ_OP_WRITE_ZEROES: 675 /* currently only aliased to deallocate for a few ctrls: */ 676 case REQ_OP_DISCARD: 677 ret = nvme_setup_discard(ns, req, cmd); 678 break; 679 case REQ_OP_READ: 680 case REQ_OP_WRITE: 681 ret = nvme_setup_rw(ns, req, cmd); 682 break; 683 default: 684 WARN_ON_ONCE(1); 685 return BLK_STS_IOERR; 686 } 687 688 cmd->common.command_id = req->tag; 689 trace_nvme_setup_cmd(req, cmd); 690 return ret; 691 } 692 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 693 694 /* 695 * Returns 0 on success. If the result is negative, it's a Linux error code; 696 * if the result is positive, it's an NVM Express status code 697 */ 698 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 699 union nvme_result *result, void *buffer, unsigned bufflen, 700 unsigned timeout, int qid, int at_head, 701 blk_mq_req_flags_t flags) 702 { 703 struct request *req; 704 int ret; 705 706 req = nvme_alloc_request(q, cmd, flags, qid); 707 if (IS_ERR(req)) 708 return PTR_ERR(req); 709 710 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 711 712 if (buffer && bufflen) { 713 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 714 if (ret) 715 goto out; 716 } 717 718 blk_execute_rq(req->q, NULL, req, at_head); 719 if (result) 720 *result = nvme_req(req)->result; 721 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 722 ret = -EINTR; 723 else 724 ret = nvme_req(req)->status; 725 out: 726 blk_mq_free_request(req); 727 return ret; 728 } 729 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 730 731 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 732 void *buffer, unsigned bufflen) 733 { 734 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, 735 NVME_QID_ANY, 0, 0); 736 } 737 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 738 739 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, 740 unsigned len, u32 seed, bool write) 741 { 742 struct bio_integrity_payload *bip; 743 int ret = -ENOMEM; 744 void *buf; 745 746 buf = kmalloc(len, GFP_KERNEL); 747 if (!buf) 748 goto out; 749 750 ret = -EFAULT; 751 if (write && copy_from_user(buf, ubuf, len)) 752 goto out_free_meta; 753 754 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 755 if (IS_ERR(bip)) { 756 ret = PTR_ERR(bip); 757 goto out_free_meta; 758 } 759 760 bip->bip_iter.bi_size = len; 761 bip->bip_iter.bi_sector = seed; 762 ret = bio_integrity_add_page(bio, virt_to_page(buf), len, 763 offset_in_page(buf)); 764 if (ret == len) 765 return buf; 766 ret = -ENOMEM; 767 out_free_meta: 768 kfree(buf); 769 out: 770 return ERR_PTR(ret); 771 } 772 773 static int nvme_submit_user_cmd(struct request_queue *q, 774 struct nvme_command *cmd, void __user *ubuffer, 775 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 776 u32 meta_seed, u32 *result, unsigned timeout) 777 { 778 bool write = nvme_is_write(cmd); 779 struct nvme_ns *ns = q->queuedata; 780 struct gendisk *disk = ns ? ns->disk : NULL; 781 struct request *req; 782 struct bio *bio = NULL; 783 void *meta = NULL; 784 int ret; 785 786 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); 787 if (IS_ERR(req)) 788 return PTR_ERR(req); 789 790 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 791 nvme_req(req)->flags |= NVME_REQ_USERCMD; 792 793 if (ubuffer && bufflen) { 794 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 795 GFP_KERNEL); 796 if (ret) 797 goto out; 798 bio = req->bio; 799 bio->bi_disk = disk; 800 if (disk && meta_buffer && meta_len) { 801 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, 802 meta_seed, write); 803 if (IS_ERR(meta)) { 804 ret = PTR_ERR(meta); 805 goto out_unmap; 806 } 807 req->cmd_flags |= REQ_INTEGRITY; 808 } 809 } 810 811 blk_execute_rq(req->q, disk, req, 0); 812 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 813 ret = -EINTR; 814 else 815 ret = nvme_req(req)->status; 816 if (result) 817 *result = le32_to_cpu(nvme_req(req)->result.u32); 818 if (meta && !ret && !write) { 819 if (copy_to_user(meta_buffer, meta, meta_len)) 820 ret = -EFAULT; 821 } 822 kfree(meta); 823 out_unmap: 824 if (bio) 825 blk_rq_unmap_user(bio); 826 out: 827 blk_mq_free_request(req); 828 return ret; 829 } 830 831 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) 832 { 833 struct nvme_ctrl *ctrl = rq->end_io_data; 834 835 blk_mq_free_request(rq); 836 837 if (status) { 838 dev_err(ctrl->device, 839 "failed nvme_keep_alive_end_io error=%d\n", 840 status); 841 return; 842 } 843 844 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 845 } 846 847 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 848 { 849 struct request *rq; 850 851 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, 852 NVME_QID_ANY); 853 if (IS_ERR(rq)) 854 return PTR_ERR(rq); 855 856 rq->timeout = ctrl->kato * HZ; 857 rq->end_io_data = ctrl; 858 859 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); 860 861 return 0; 862 } 863 864 static void nvme_keep_alive_work(struct work_struct *work) 865 { 866 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 867 struct nvme_ctrl, ka_work); 868 869 if (nvme_keep_alive(ctrl)) { 870 /* allocation failure, reset the controller */ 871 dev_err(ctrl->device, "keep-alive failed\n"); 872 nvme_reset_ctrl(ctrl); 873 return; 874 } 875 } 876 877 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 878 { 879 if (unlikely(ctrl->kato == 0)) 880 return; 881 882 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 883 } 884 885 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 886 { 887 if (unlikely(ctrl->kato == 0)) 888 return; 889 890 cancel_delayed_work_sync(&ctrl->ka_work); 891 } 892 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 893 894 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 895 { 896 struct nvme_command c = { }; 897 int error; 898 899 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 900 c.identify.opcode = nvme_admin_identify; 901 c.identify.cns = NVME_ID_CNS_CTRL; 902 903 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 904 if (!*id) 905 return -ENOMEM; 906 907 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 908 sizeof(struct nvme_id_ctrl)); 909 if (error) 910 kfree(*id); 911 return error; 912 } 913 914 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, 915 struct nvme_ns_ids *ids) 916 { 917 struct nvme_command c = { }; 918 int status; 919 void *data; 920 int pos; 921 int len; 922 923 c.identify.opcode = nvme_admin_identify; 924 c.identify.nsid = cpu_to_le32(nsid); 925 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 926 927 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 928 if (!data) 929 return -ENOMEM; 930 931 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 932 NVME_IDENTIFY_DATA_SIZE); 933 if (status) 934 goto free_data; 935 936 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 937 struct nvme_ns_id_desc *cur = data + pos; 938 939 if (cur->nidl == 0) 940 break; 941 942 switch (cur->nidt) { 943 case NVME_NIDT_EUI64: 944 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 945 dev_warn(ctrl->device, 946 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n", 947 cur->nidl); 948 goto free_data; 949 } 950 len = NVME_NIDT_EUI64_LEN; 951 memcpy(ids->eui64, data + pos + sizeof(*cur), len); 952 break; 953 case NVME_NIDT_NGUID: 954 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 955 dev_warn(ctrl->device, 956 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n", 957 cur->nidl); 958 goto free_data; 959 } 960 len = NVME_NIDT_NGUID_LEN; 961 memcpy(ids->nguid, data + pos + sizeof(*cur), len); 962 break; 963 case NVME_NIDT_UUID: 964 if (cur->nidl != NVME_NIDT_UUID_LEN) { 965 dev_warn(ctrl->device, 966 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n", 967 cur->nidl); 968 goto free_data; 969 } 970 len = NVME_NIDT_UUID_LEN; 971 uuid_copy(&ids->uuid, data + pos + sizeof(*cur)); 972 break; 973 default: 974 /* Skip unnkown types */ 975 len = cur->nidl; 976 break; 977 } 978 979 len += sizeof(*cur); 980 } 981 free_data: 982 kfree(data); 983 return status; 984 } 985 986 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) 987 { 988 struct nvme_command c = { }; 989 990 c.identify.opcode = nvme_admin_identify; 991 c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; 992 c.identify.nsid = cpu_to_le32(nsid); 993 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 994 NVME_IDENTIFY_DATA_SIZE); 995 } 996 997 static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, 998 unsigned nsid) 999 { 1000 struct nvme_id_ns *id; 1001 struct nvme_command c = { }; 1002 int error; 1003 1004 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1005 c.identify.opcode = nvme_admin_identify; 1006 c.identify.nsid = cpu_to_le32(nsid); 1007 c.identify.cns = NVME_ID_CNS_NS; 1008 1009 id = kmalloc(sizeof(*id), GFP_KERNEL); 1010 if (!id) 1011 return NULL; 1012 1013 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 1014 if (error) { 1015 dev_warn(ctrl->device, "Identify namespace failed\n"); 1016 kfree(id); 1017 return NULL; 1018 } 1019 1020 return id; 1021 } 1022 1023 static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, 1024 void *buffer, size_t buflen, u32 *result) 1025 { 1026 struct nvme_command c; 1027 union nvme_result res; 1028 int ret; 1029 1030 memset(&c, 0, sizeof(c)); 1031 c.features.opcode = nvme_admin_set_features; 1032 c.features.fid = cpu_to_le32(fid); 1033 c.features.dword11 = cpu_to_le32(dword11); 1034 1035 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1036 buffer, buflen, 0, NVME_QID_ANY, 0, 0); 1037 if (ret >= 0 && result) 1038 *result = le32_to_cpu(res.u32); 1039 return ret; 1040 } 1041 1042 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1043 { 1044 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1045 u32 result; 1046 int status, nr_io_queues; 1047 1048 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1049 &result); 1050 if (status < 0) 1051 return status; 1052 1053 /* 1054 * Degraded controllers might return an error when setting the queue 1055 * count. We still want to be able to bring them online and offer 1056 * access to the admin queue, as that might be only way to fix them up. 1057 */ 1058 if (status > 0) { 1059 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1060 *count = 0; 1061 } else { 1062 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1063 *count = min(*count, nr_io_queues); 1064 } 1065 1066 return 0; 1067 } 1068 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1069 1070 #define NVME_AEN_SUPPORTED \ 1071 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE) 1072 1073 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1074 { 1075 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1076 int status; 1077 1078 if (!supported_aens) 1079 return; 1080 1081 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1082 NULL, 0, &result); 1083 if (status) 1084 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1085 supported_aens); 1086 } 1087 1088 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1089 { 1090 struct nvme_user_io io; 1091 struct nvme_command c; 1092 unsigned length, meta_len; 1093 void __user *metadata; 1094 1095 if (copy_from_user(&io, uio, sizeof(io))) 1096 return -EFAULT; 1097 if (io.flags) 1098 return -EINVAL; 1099 1100 switch (io.opcode) { 1101 case nvme_cmd_write: 1102 case nvme_cmd_read: 1103 case nvme_cmd_compare: 1104 break; 1105 default: 1106 return -EINVAL; 1107 } 1108 1109 length = (io.nblocks + 1) << ns->lba_shift; 1110 meta_len = (io.nblocks + 1) * ns->ms; 1111 metadata = (void __user *)(uintptr_t)io.metadata; 1112 1113 if (ns->ext) { 1114 length += meta_len; 1115 meta_len = 0; 1116 } else if (meta_len) { 1117 if ((io.metadata & 3) || !io.metadata) 1118 return -EINVAL; 1119 } 1120 1121 memset(&c, 0, sizeof(c)); 1122 c.rw.opcode = io.opcode; 1123 c.rw.flags = io.flags; 1124 c.rw.nsid = cpu_to_le32(ns->head->ns_id); 1125 c.rw.slba = cpu_to_le64(io.slba); 1126 c.rw.length = cpu_to_le16(io.nblocks); 1127 c.rw.control = cpu_to_le16(io.control); 1128 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 1129 c.rw.reftag = cpu_to_le32(io.reftag); 1130 c.rw.apptag = cpu_to_le16(io.apptag); 1131 c.rw.appmask = cpu_to_le16(io.appmask); 1132 1133 return nvme_submit_user_cmd(ns->queue, &c, 1134 (void __user *)(uintptr_t)io.addr, length, 1135 metadata, meta_len, io.slba, NULL, 0); 1136 } 1137 1138 static u32 nvme_known_admin_effects(u8 opcode) 1139 { 1140 switch (opcode) { 1141 case nvme_admin_format_nvm: 1142 return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | 1143 NVME_CMD_EFFECTS_CSE_MASK; 1144 case nvme_admin_sanitize_nvm: 1145 return NVME_CMD_EFFECTS_CSE_MASK; 1146 default: 1147 break; 1148 } 1149 return 0; 1150 } 1151 1152 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1153 u8 opcode) 1154 { 1155 u32 effects = 0; 1156 1157 if (ns) { 1158 if (ctrl->effects) 1159 effects = le32_to_cpu(ctrl->effects->iocs[opcode]); 1160 if (effects & ~NVME_CMD_EFFECTS_CSUPP) 1161 dev_warn(ctrl->device, 1162 "IO command:%02x has unhandled effects:%08x\n", 1163 opcode, effects); 1164 return 0; 1165 } 1166 1167 if (ctrl->effects) 1168 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1169 else 1170 effects = nvme_known_admin_effects(opcode); 1171 1172 /* 1173 * For simplicity, IO to all namespaces is quiesced even if the command 1174 * effects say only one namespace is affected. 1175 */ 1176 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1177 nvme_start_freeze(ctrl); 1178 nvme_wait_freeze(ctrl); 1179 } 1180 return effects; 1181 } 1182 1183 static void nvme_update_formats(struct nvme_ctrl *ctrl) 1184 { 1185 struct nvme_ns *ns; 1186 1187 down_read(&ctrl->namespaces_rwsem); 1188 list_for_each_entry(ns, &ctrl->namespaces, list) 1189 if (ns->disk && nvme_revalidate_disk(ns->disk)) 1190 nvme_set_queue_dying(ns); 1191 up_read(&ctrl->namespaces_rwsem); 1192 1193 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); 1194 } 1195 1196 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1197 { 1198 /* 1199 * Revalidate LBA changes prior to unfreezing. This is necessary to 1200 * prevent memory corruption if a logical block size was changed by 1201 * this command. 1202 */ 1203 if (effects & NVME_CMD_EFFECTS_LBCC) 1204 nvme_update_formats(ctrl); 1205 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) 1206 nvme_unfreeze(ctrl); 1207 if (effects & NVME_CMD_EFFECTS_CCC) 1208 nvme_init_identify(ctrl); 1209 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) 1210 nvme_queue_scan(ctrl); 1211 } 1212 1213 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1214 struct nvme_passthru_cmd __user *ucmd) 1215 { 1216 struct nvme_passthru_cmd cmd; 1217 struct nvme_command c; 1218 unsigned timeout = 0; 1219 u32 effects; 1220 int status; 1221 1222 if (!capable(CAP_SYS_ADMIN)) 1223 return -EACCES; 1224 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1225 return -EFAULT; 1226 if (cmd.flags) 1227 return -EINVAL; 1228 1229 memset(&c, 0, sizeof(c)); 1230 c.common.opcode = cmd.opcode; 1231 c.common.flags = cmd.flags; 1232 c.common.nsid = cpu_to_le32(cmd.nsid); 1233 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1234 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1235 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 1236 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 1237 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 1238 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 1239 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 1240 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 1241 1242 if (cmd.timeout_ms) 1243 timeout = msecs_to_jiffies(cmd.timeout_ms); 1244 1245 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); 1246 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1247 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 1248 (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, 1249 0, &cmd.result, timeout); 1250 nvme_passthru_end(ctrl, effects); 1251 1252 if (status >= 0) { 1253 if (put_user(cmd.result, &ucmd->result)) 1254 return -EFAULT; 1255 } 1256 1257 return status; 1258 } 1259 1260 /* 1261 * Issue ioctl requests on the first available path. Note that unlike normal 1262 * block layer requests we will not retry failed request on another controller. 1263 */ 1264 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, 1265 struct nvme_ns_head **head, int *srcu_idx) 1266 { 1267 #ifdef CONFIG_NVME_MULTIPATH 1268 if (disk->fops == &nvme_ns_head_ops) { 1269 *head = disk->private_data; 1270 *srcu_idx = srcu_read_lock(&(*head)->srcu); 1271 return nvme_find_path(*head); 1272 } 1273 #endif 1274 *head = NULL; 1275 *srcu_idx = -1; 1276 return disk->private_data; 1277 } 1278 1279 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) 1280 { 1281 if (head) 1282 srcu_read_unlock(&head->srcu, idx); 1283 } 1284 1285 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg) 1286 { 1287 switch (cmd) { 1288 case NVME_IOCTL_ID: 1289 force_successful_syscall_return(); 1290 return ns->head->ns_id; 1291 case NVME_IOCTL_ADMIN_CMD: 1292 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); 1293 case NVME_IOCTL_IO_CMD: 1294 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); 1295 case NVME_IOCTL_SUBMIT_IO: 1296 return nvme_submit_io(ns, (void __user *)arg); 1297 default: 1298 #ifdef CONFIG_NVM 1299 if (ns->ndev) 1300 return nvme_nvm_ioctl(ns, cmd, arg); 1301 #endif 1302 if (is_sed_ioctl(cmd)) 1303 return sed_ioctl(ns->ctrl->opal_dev, cmd, 1304 (void __user *) arg); 1305 return -ENOTTY; 1306 } 1307 } 1308 1309 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 1310 unsigned int cmd, unsigned long arg) 1311 { 1312 struct nvme_ns_head *head = NULL; 1313 struct nvme_ns *ns; 1314 int srcu_idx, ret; 1315 1316 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1317 if (unlikely(!ns)) 1318 ret = -EWOULDBLOCK; 1319 else 1320 ret = nvme_ns_ioctl(ns, cmd, arg); 1321 nvme_put_ns_from_disk(head, srcu_idx); 1322 return ret; 1323 } 1324 1325 static int nvme_open(struct block_device *bdev, fmode_t mode) 1326 { 1327 struct nvme_ns *ns = bdev->bd_disk->private_data; 1328 1329 #ifdef CONFIG_NVME_MULTIPATH 1330 /* should never be called due to GENHD_FL_HIDDEN */ 1331 if (WARN_ON_ONCE(ns->head->disk)) 1332 goto fail; 1333 #endif 1334 if (!kref_get_unless_zero(&ns->kref)) 1335 goto fail; 1336 if (!try_module_get(ns->ctrl->ops->module)) 1337 goto fail_put_ns; 1338 1339 return 0; 1340 1341 fail_put_ns: 1342 nvme_put_ns(ns); 1343 fail: 1344 return -ENXIO; 1345 } 1346 1347 static void nvme_release(struct gendisk *disk, fmode_t mode) 1348 { 1349 struct nvme_ns *ns = disk->private_data; 1350 1351 module_put(ns->ctrl->ops->module); 1352 nvme_put_ns(ns); 1353 } 1354 1355 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1356 { 1357 /* some standard values */ 1358 geo->heads = 1 << 6; 1359 geo->sectors = 1 << 5; 1360 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1361 return 0; 1362 } 1363 1364 #ifdef CONFIG_BLK_DEV_INTEGRITY 1365 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) 1366 { 1367 struct blk_integrity integrity; 1368 1369 memset(&integrity, 0, sizeof(integrity)); 1370 switch (pi_type) { 1371 case NVME_NS_DPS_PI_TYPE3: 1372 integrity.profile = &t10_pi_type3_crc; 1373 integrity.tag_size = sizeof(u16) + sizeof(u32); 1374 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1375 break; 1376 case NVME_NS_DPS_PI_TYPE1: 1377 case NVME_NS_DPS_PI_TYPE2: 1378 integrity.profile = &t10_pi_type1_crc; 1379 integrity.tag_size = sizeof(u16); 1380 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1381 break; 1382 default: 1383 integrity.profile = NULL; 1384 break; 1385 } 1386 integrity.tuple_size = ms; 1387 blk_integrity_register(disk, &integrity); 1388 blk_queue_max_integrity_segments(disk->queue, 1); 1389 } 1390 #else 1391 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) 1392 { 1393 } 1394 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1395 1396 static void nvme_set_chunk_size(struct nvme_ns *ns) 1397 { 1398 u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9)); 1399 blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); 1400 } 1401 1402 static void nvme_config_discard(struct nvme_ns *ns) 1403 { 1404 struct nvme_ctrl *ctrl = ns->ctrl; 1405 struct request_queue *queue = ns->queue; 1406 u32 size = queue_logical_block_size(queue); 1407 1408 if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { 1409 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue); 1410 return; 1411 } 1412 1413 if (ctrl->nr_streams && ns->sws && ns->sgs) 1414 size *= ns->sws * ns->sgs; 1415 1416 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1417 NVME_DSM_MAX_RANGES); 1418 1419 queue->limits.discard_alignment = 0; 1420 queue->limits.discard_granularity = size; 1421 1422 /* If discard is already enabled, don't reset queue limits */ 1423 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue)) 1424 return; 1425 1426 blk_queue_max_discard_sectors(queue, UINT_MAX); 1427 blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); 1428 1429 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1430 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1431 } 1432 1433 static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, 1434 struct nvme_id_ns *id, struct nvme_ns_ids *ids) 1435 { 1436 memset(ids, 0, sizeof(*ids)); 1437 1438 if (ctrl->vs >= NVME_VS(1, 1, 0)) 1439 memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); 1440 if (ctrl->vs >= NVME_VS(1, 2, 0)) 1441 memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); 1442 if (ctrl->vs >= NVME_VS(1, 3, 0)) { 1443 /* Don't treat error as fatal we potentially 1444 * already have a NGUID or EUI-64 1445 */ 1446 if (nvme_identify_ns_descs(ctrl, nsid, ids)) 1447 dev_warn(ctrl->device, 1448 "%s: Identify Descriptors failed\n", __func__); 1449 } 1450 } 1451 1452 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) 1453 { 1454 return !uuid_is_null(&ids->uuid) || 1455 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) || 1456 memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 1457 } 1458 1459 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1460 { 1461 return uuid_equal(&a->uuid, &b->uuid) && 1462 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1463 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0; 1464 } 1465 1466 static void nvme_update_disk_info(struct gendisk *disk, 1467 struct nvme_ns *ns, struct nvme_id_ns *id) 1468 { 1469 sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9); 1470 unsigned short bs = 1 << ns->lba_shift; 1471 1472 blk_mq_freeze_queue(disk->queue); 1473 blk_integrity_unregister(disk); 1474 1475 blk_queue_logical_block_size(disk->queue, bs); 1476 blk_queue_physical_block_size(disk->queue, bs); 1477 blk_queue_io_min(disk->queue, bs); 1478 1479 if (ns->ms && !ns->ext && 1480 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1481 nvme_init_integrity(disk, ns->ms, ns->pi_type); 1482 if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) 1483 capacity = 0; 1484 1485 set_capacity(disk, capacity); 1486 nvme_config_discard(ns); 1487 1488 if (id->nsattr & (1 << 0)) 1489 set_disk_ro(disk, true); 1490 else 1491 set_disk_ro(disk, false); 1492 1493 blk_mq_unfreeze_queue(disk->queue); 1494 } 1495 1496 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) 1497 { 1498 struct nvme_ns *ns = disk->private_data; 1499 1500 /* 1501 * If identify namespace failed, use default 512 byte block size so 1502 * block layer can use before failing read/write for 0 capacity. 1503 */ 1504 ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; 1505 if (ns->lba_shift == 0) 1506 ns->lba_shift = 9; 1507 ns->noiob = le16_to_cpu(id->noiob); 1508 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1509 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 1510 /* the PI implementation requires metadata equal t10 pi tuple size */ 1511 if (ns->ms == sizeof(struct t10_pi_tuple)) 1512 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1513 else 1514 ns->pi_type = 0; 1515 1516 if (ns->noiob) 1517 nvme_set_chunk_size(ns); 1518 nvme_update_disk_info(disk, ns, id); 1519 if (ns->ndev) 1520 nvme_nvm_update_nvm_info(ns); 1521 #ifdef CONFIG_NVME_MULTIPATH 1522 if (ns->head->disk) 1523 nvme_update_disk_info(ns->head->disk, ns, id); 1524 #endif 1525 } 1526 1527 static int nvme_revalidate_disk(struct gendisk *disk) 1528 { 1529 struct nvme_ns *ns = disk->private_data; 1530 struct nvme_ctrl *ctrl = ns->ctrl; 1531 struct nvme_id_ns *id; 1532 struct nvme_ns_ids ids; 1533 int ret = 0; 1534 1535 if (test_bit(NVME_NS_DEAD, &ns->flags)) { 1536 set_capacity(disk, 0); 1537 return -ENODEV; 1538 } 1539 1540 id = nvme_identify_ns(ctrl, ns->head->ns_id); 1541 if (!id) 1542 return -ENODEV; 1543 1544 if (id->ncap == 0) { 1545 ret = -ENODEV; 1546 goto out; 1547 } 1548 1549 __nvme_revalidate_disk(disk, id); 1550 nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); 1551 if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) { 1552 dev_err(ctrl->device, 1553 "identifiers changed for nsid %d\n", ns->head->ns_id); 1554 ret = -ENODEV; 1555 } 1556 1557 out: 1558 kfree(id); 1559 return ret; 1560 } 1561 1562 static char nvme_pr_type(enum pr_type type) 1563 { 1564 switch (type) { 1565 case PR_WRITE_EXCLUSIVE: 1566 return 1; 1567 case PR_EXCLUSIVE_ACCESS: 1568 return 2; 1569 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1570 return 3; 1571 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1572 return 4; 1573 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1574 return 5; 1575 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1576 return 6; 1577 default: 1578 return 0; 1579 } 1580 }; 1581 1582 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 1583 u64 key, u64 sa_key, u8 op) 1584 { 1585 struct nvme_ns_head *head = NULL; 1586 struct nvme_ns *ns; 1587 struct nvme_command c; 1588 int srcu_idx, ret; 1589 u8 data[16] = { 0, }; 1590 1591 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1592 if (unlikely(!ns)) 1593 return -EWOULDBLOCK; 1594 1595 put_unaligned_le64(key, &data[0]); 1596 put_unaligned_le64(sa_key, &data[8]); 1597 1598 memset(&c, 0, sizeof(c)); 1599 c.common.opcode = op; 1600 c.common.nsid = cpu_to_le32(ns->head->ns_id); 1601 c.common.cdw10[0] = cpu_to_le32(cdw10); 1602 1603 ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); 1604 nvme_put_ns_from_disk(head, srcu_idx); 1605 return ret; 1606 } 1607 1608 static int nvme_pr_register(struct block_device *bdev, u64 old, 1609 u64 new, unsigned flags) 1610 { 1611 u32 cdw10; 1612 1613 if (flags & ~PR_FL_IGNORE_KEY) 1614 return -EOPNOTSUPP; 1615 1616 cdw10 = old ? 2 : 0; 1617 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 1618 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 1619 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 1620 } 1621 1622 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 1623 enum pr_type type, unsigned flags) 1624 { 1625 u32 cdw10; 1626 1627 if (flags & ~PR_FL_IGNORE_KEY) 1628 return -EOPNOTSUPP; 1629 1630 cdw10 = nvme_pr_type(type) << 8; 1631 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 1632 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 1633 } 1634 1635 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 1636 enum pr_type type, bool abort) 1637 { 1638 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); 1639 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 1640 } 1641 1642 static int nvme_pr_clear(struct block_device *bdev, u64 key) 1643 { 1644 u32 cdw10 = 1 | (key ? 1 << 3 : 0); 1645 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); 1646 } 1647 1648 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1649 { 1650 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0); 1651 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 1652 } 1653 1654 static const struct pr_ops nvme_pr_ops = { 1655 .pr_register = nvme_pr_register, 1656 .pr_reserve = nvme_pr_reserve, 1657 .pr_release = nvme_pr_release, 1658 .pr_preempt = nvme_pr_preempt, 1659 .pr_clear = nvme_pr_clear, 1660 }; 1661 1662 #ifdef CONFIG_BLK_SED_OPAL 1663 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 1664 bool send) 1665 { 1666 struct nvme_ctrl *ctrl = data; 1667 struct nvme_command cmd; 1668 1669 memset(&cmd, 0, sizeof(cmd)); 1670 if (send) 1671 cmd.common.opcode = nvme_admin_security_send; 1672 else 1673 cmd.common.opcode = nvme_admin_security_recv; 1674 cmd.common.nsid = 0; 1675 cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 1676 cmd.common.cdw10[1] = cpu_to_le32(len); 1677 1678 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 1679 ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); 1680 } 1681 EXPORT_SYMBOL_GPL(nvme_sec_submit); 1682 #endif /* CONFIG_BLK_SED_OPAL */ 1683 1684 static const struct block_device_operations nvme_fops = { 1685 .owner = THIS_MODULE, 1686 .ioctl = nvme_ioctl, 1687 .compat_ioctl = nvme_ioctl, 1688 .open = nvme_open, 1689 .release = nvme_release, 1690 .getgeo = nvme_getgeo, 1691 .revalidate_disk= nvme_revalidate_disk, 1692 .pr_ops = &nvme_pr_ops, 1693 }; 1694 1695 #ifdef CONFIG_NVME_MULTIPATH 1696 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) 1697 { 1698 struct nvme_ns_head *head = bdev->bd_disk->private_data; 1699 1700 if (!kref_get_unless_zero(&head->ref)) 1701 return -ENXIO; 1702 return 0; 1703 } 1704 1705 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) 1706 { 1707 nvme_put_ns_head(disk->private_data); 1708 } 1709 1710 const struct block_device_operations nvme_ns_head_ops = { 1711 .owner = THIS_MODULE, 1712 .open = nvme_ns_head_open, 1713 .release = nvme_ns_head_release, 1714 .ioctl = nvme_ioctl, 1715 .compat_ioctl = nvme_ioctl, 1716 .getgeo = nvme_getgeo, 1717 .pr_ops = &nvme_pr_ops, 1718 }; 1719 #endif /* CONFIG_NVME_MULTIPATH */ 1720 1721 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 1722 { 1723 unsigned long timeout = 1724 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1725 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 1726 int ret; 1727 1728 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1729 if (csts == ~0) 1730 return -ENODEV; 1731 if ((csts & NVME_CSTS_RDY) == bit) 1732 break; 1733 1734 msleep(100); 1735 if (fatal_signal_pending(current)) 1736 return -EINTR; 1737 if (time_after(jiffies, timeout)) { 1738 dev_err(ctrl->device, 1739 "Device not ready; aborting %s\n", enabled ? 1740 "initialisation" : "reset"); 1741 return -ENODEV; 1742 } 1743 } 1744 1745 return ret; 1746 } 1747 1748 /* 1749 * If the device has been passed off to us in an enabled state, just clear 1750 * the enabled bit. The spec says we should set the 'shutdown notification 1751 * bits', but doing so may cause the device to complete commands to the 1752 * admin queue ... and we don't know what memory that might be pointing at! 1753 */ 1754 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1755 { 1756 int ret; 1757 1758 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1759 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 1760 1761 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1762 if (ret) 1763 return ret; 1764 1765 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 1766 msleep(NVME_QUIRK_DELAY_AMOUNT); 1767 1768 return nvme_wait_ready(ctrl, cap, false); 1769 } 1770 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 1771 1772 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1773 { 1774 /* 1775 * Default to a 4K page size, with the intention to update this 1776 * path in the future to accomodate architectures with differing 1777 * kernel and IO page sizes. 1778 */ 1779 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12; 1780 int ret; 1781 1782 if (page_shift < dev_page_min) { 1783 dev_err(ctrl->device, 1784 "Minimum device page size %u too large for host (%u)\n", 1785 1 << dev_page_min, 1 << page_shift); 1786 return -ENODEV; 1787 } 1788 1789 ctrl->page_size = 1 << page_shift; 1790 1791 ctrl->ctrl_config = NVME_CC_CSS_NVM; 1792 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; 1793 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 1794 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1795 ctrl->ctrl_config |= NVME_CC_ENABLE; 1796 1797 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1798 if (ret) 1799 return ret; 1800 return nvme_wait_ready(ctrl, cap, true); 1801 } 1802 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 1803 1804 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 1805 { 1806 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); 1807 u32 csts; 1808 int ret; 1809 1810 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1811 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 1812 1813 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1814 if (ret) 1815 return ret; 1816 1817 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1818 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 1819 break; 1820 1821 msleep(100); 1822 if (fatal_signal_pending(current)) 1823 return -EINTR; 1824 if (time_after(jiffies, timeout)) { 1825 dev_err(ctrl->device, 1826 "Device shutdown incomplete; abort shutdown\n"); 1827 return -ENODEV; 1828 } 1829 } 1830 1831 return ret; 1832 } 1833 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 1834 1835 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1836 struct request_queue *q) 1837 { 1838 bool vwc = false; 1839 1840 if (ctrl->max_hw_sectors) { 1841 u32 max_segments = 1842 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 1843 1844 max_segments = min_not_zero(max_segments, ctrl->max_segments); 1845 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1846 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1847 } 1848 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 1849 is_power_of_2(ctrl->max_hw_sectors)) 1850 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); 1851 blk_queue_virt_boundary(q, ctrl->page_size - 1); 1852 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 1853 vwc = true; 1854 blk_queue_write_cache(q, vwc, vwc); 1855 } 1856 1857 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 1858 { 1859 __le64 ts; 1860 int ret; 1861 1862 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 1863 return 0; 1864 1865 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 1866 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 1867 NULL); 1868 if (ret) 1869 dev_warn_once(ctrl->device, 1870 "could not set timestamp (%d)\n", ret); 1871 return ret; 1872 } 1873 1874 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 1875 { 1876 /* 1877 * APST (Autonomous Power State Transition) lets us program a 1878 * table of power state transitions that the controller will 1879 * perform automatically. We configure it with a simple 1880 * heuristic: we are willing to spend at most 2% of the time 1881 * transitioning between power states. Therefore, when running 1882 * in any given state, we will enter the next lower-power 1883 * non-operational state after waiting 50 * (enlat + exlat) 1884 * microseconds, as long as that state's exit latency is under 1885 * the requested maximum latency. 1886 * 1887 * We will not autonomously enter any non-operational state for 1888 * which the total latency exceeds ps_max_latency_us. Users 1889 * can set ps_max_latency_us to zero to turn off APST. 1890 */ 1891 1892 unsigned apste; 1893 struct nvme_feat_auto_pst *table; 1894 u64 max_lat_us = 0; 1895 int max_ps = -1; 1896 int ret; 1897 1898 /* 1899 * If APST isn't supported or if we haven't been initialized yet, 1900 * then don't do anything. 1901 */ 1902 if (!ctrl->apsta) 1903 return 0; 1904 1905 if (ctrl->npss > 31) { 1906 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 1907 return 0; 1908 } 1909 1910 table = kzalloc(sizeof(*table), GFP_KERNEL); 1911 if (!table) 1912 return 0; 1913 1914 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 1915 /* Turn off APST. */ 1916 apste = 0; 1917 dev_dbg(ctrl->device, "APST disabled\n"); 1918 } else { 1919 __le64 target = cpu_to_le64(0); 1920 int state; 1921 1922 /* 1923 * Walk through all states from lowest- to highest-power. 1924 * According to the spec, lower-numbered states use more 1925 * power. NPSS, despite the name, is the index of the 1926 * lowest-power state, not the number of states. 1927 */ 1928 for (state = (int)ctrl->npss; state >= 0; state--) { 1929 u64 total_latency_us, exit_latency_us, transition_ms; 1930 1931 if (target) 1932 table->entries[state] = target; 1933 1934 /* 1935 * Don't allow transitions to the deepest state 1936 * if it's quirked off. 1937 */ 1938 if (state == ctrl->npss && 1939 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 1940 continue; 1941 1942 /* 1943 * Is this state a useful non-operational state for 1944 * higher-power states to autonomously transition to? 1945 */ 1946 if (!(ctrl->psd[state].flags & 1947 NVME_PS_FLAGS_NON_OP_STATE)) 1948 continue; 1949 1950 exit_latency_us = 1951 (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 1952 if (exit_latency_us > ctrl->ps_max_latency_us) 1953 continue; 1954 1955 total_latency_us = 1956 exit_latency_us + 1957 le32_to_cpu(ctrl->psd[state].entry_lat); 1958 1959 /* 1960 * This state is good. Use it as the APST idle 1961 * target for higher power states. 1962 */ 1963 transition_ms = total_latency_us + 19; 1964 do_div(transition_ms, 20); 1965 if (transition_ms > (1 << 24) - 1) 1966 transition_ms = (1 << 24) - 1; 1967 1968 target = cpu_to_le64((state << 3) | 1969 (transition_ms << 8)); 1970 1971 if (max_ps == -1) 1972 max_ps = state; 1973 1974 if (total_latency_us > max_lat_us) 1975 max_lat_us = total_latency_us; 1976 } 1977 1978 apste = 1; 1979 1980 if (max_ps == -1) { 1981 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 1982 } else { 1983 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 1984 max_ps, max_lat_us, (int)sizeof(*table), table); 1985 } 1986 } 1987 1988 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 1989 table, sizeof(*table), NULL); 1990 if (ret) 1991 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 1992 1993 kfree(table); 1994 return ret; 1995 } 1996 1997 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 1998 { 1999 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2000 u64 latency; 2001 2002 switch (val) { 2003 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2004 case PM_QOS_LATENCY_ANY: 2005 latency = U64_MAX; 2006 break; 2007 2008 default: 2009 latency = val; 2010 } 2011 2012 if (ctrl->ps_max_latency_us != latency) { 2013 ctrl->ps_max_latency_us = latency; 2014 nvme_configure_apst(ctrl); 2015 } 2016 } 2017 2018 struct nvme_core_quirk_entry { 2019 /* 2020 * NVMe model and firmware strings are padded with spaces. For 2021 * simplicity, strings in the quirk table are padded with NULLs 2022 * instead. 2023 */ 2024 u16 vid; 2025 const char *mn; 2026 const char *fr; 2027 unsigned long quirks; 2028 }; 2029 2030 static const struct nvme_core_quirk_entry core_quirks[] = { 2031 { 2032 /* 2033 * This Toshiba device seems to die using any APST states. See: 2034 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2035 */ 2036 .vid = 0x1179, 2037 .mn = "THNSF5256GPUK TOSHIBA", 2038 .quirks = NVME_QUIRK_NO_APST, 2039 } 2040 }; 2041 2042 /* match is null-terminated but idstr is space-padded. */ 2043 static bool string_matches(const char *idstr, const char *match, size_t len) 2044 { 2045 size_t matchlen; 2046 2047 if (!match) 2048 return true; 2049 2050 matchlen = strlen(match); 2051 WARN_ON_ONCE(matchlen > len); 2052 2053 if (memcmp(idstr, match, matchlen)) 2054 return false; 2055 2056 for (; matchlen < len; matchlen++) 2057 if (idstr[matchlen] != ' ') 2058 return false; 2059 2060 return true; 2061 } 2062 2063 static bool quirk_matches(const struct nvme_id_ctrl *id, 2064 const struct nvme_core_quirk_entry *q) 2065 { 2066 return q->vid == le16_to_cpu(id->vid) && 2067 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2068 string_matches(id->fr, q->fr, sizeof(id->fr)); 2069 } 2070 2071 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2072 struct nvme_id_ctrl *id) 2073 { 2074 size_t nqnlen; 2075 int off; 2076 2077 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2078 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2079 strncpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2080 return; 2081 } 2082 2083 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2084 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2085 2086 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 2087 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2088 "nqn.2014.08.org.nvmexpress:%4x%4x", 2089 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2090 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2091 off += sizeof(id->sn); 2092 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2093 off += sizeof(id->mn); 2094 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2095 } 2096 2097 static void __nvme_release_subsystem(struct nvme_subsystem *subsys) 2098 { 2099 ida_simple_remove(&nvme_subsystems_ida, subsys->instance); 2100 kfree(subsys); 2101 } 2102 2103 static void nvme_release_subsystem(struct device *dev) 2104 { 2105 __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev)); 2106 } 2107 2108 static void nvme_destroy_subsystem(struct kref *ref) 2109 { 2110 struct nvme_subsystem *subsys = 2111 container_of(ref, struct nvme_subsystem, ref); 2112 2113 mutex_lock(&nvme_subsystems_lock); 2114 list_del(&subsys->entry); 2115 mutex_unlock(&nvme_subsystems_lock); 2116 2117 ida_destroy(&subsys->ns_ida); 2118 device_del(&subsys->dev); 2119 put_device(&subsys->dev); 2120 } 2121 2122 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2123 { 2124 kref_put(&subsys->ref, nvme_destroy_subsystem); 2125 } 2126 2127 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2128 { 2129 struct nvme_subsystem *subsys; 2130 2131 lockdep_assert_held(&nvme_subsystems_lock); 2132 2133 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2134 if (strcmp(subsys->subnqn, subsysnqn)) 2135 continue; 2136 if (!kref_get_unless_zero(&subsys->ref)) 2137 continue; 2138 return subsys; 2139 } 2140 2141 return NULL; 2142 } 2143 2144 #define SUBSYS_ATTR_RO(_name, _mode, _show) \ 2145 struct device_attribute subsys_attr_##_name = \ 2146 __ATTR(_name, _mode, _show, NULL) 2147 2148 static ssize_t nvme_subsys_show_nqn(struct device *dev, 2149 struct device_attribute *attr, 2150 char *buf) 2151 { 2152 struct nvme_subsystem *subsys = 2153 container_of(dev, struct nvme_subsystem, dev); 2154 2155 return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); 2156 } 2157 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2158 2159 #define nvme_subsys_show_str_function(field) \ 2160 static ssize_t subsys_##field##_show(struct device *dev, \ 2161 struct device_attribute *attr, char *buf) \ 2162 { \ 2163 struct nvme_subsystem *subsys = \ 2164 container_of(dev, struct nvme_subsystem, dev); \ 2165 return sprintf(buf, "%.*s\n", \ 2166 (int)sizeof(subsys->field), subsys->field); \ 2167 } \ 2168 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 2169 2170 nvme_subsys_show_str_function(model); 2171 nvme_subsys_show_str_function(serial); 2172 nvme_subsys_show_str_function(firmware_rev); 2173 2174 static struct attribute *nvme_subsys_attrs[] = { 2175 &subsys_attr_model.attr, 2176 &subsys_attr_serial.attr, 2177 &subsys_attr_firmware_rev.attr, 2178 &subsys_attr_subsysnqn.attr, 2179 NULL, 2180 }; 2181 2182 static struct attribute_group nvme_subsys_attrs_group = { 2183 .attrs = nvme_subsys_attrs, 2184 }; 2185 2186 static const struct attribute_group *nvme_subsys_attrs_groups[] = { 2187 &nvme_subsys_attrs_group, 2188 NULL, 2189 }; 2190 2191 static int nvme_active_ctrls(struct nvme_subsystem *subsys) 2192 { 2193 int count = 0; 2194 struct nvme_ctrl *ctrl; 2195 2196 mutex_lock(&subsys->lock); 2197 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { 2198 if (ctrl->state != NVME_CTRL_DELETING && 2199 ctrl->state != NVME_CTRL_DEAD) 2200 count++; 2201 } 2202 mutex_unlock(&subsys->lock); 2203 2204 return count; 2205 } 2206 2207 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2208 { 2209 struct nvme_subsystem *subsys, *found; 2210 int ret; 2211 2212 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2213 if (!subsys) 2214 return -ENOMEM; 2215 ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL); 2216 if (ret < 0) { 2217 kfree(subsys); 2218 return ret; 2219 } 2220 subsys->instance = ret; 2221 mutex_init(&subsys->lock); 2222 kref_init(&subsys->ref); 2223 INIT_LIST_HEAD(&subsys->ctrls); 2224 INIT_LIST_HEAD(&subsys->nsheads); 2225 nvme_init_subnqn(subsys, ctrl, id); 2226 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2227 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2228 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); 2229 subsys->vendor_id = le16_to_cpu(id->vid); 2230 subsys->cmic = id->cmic; 2231 2232 subsys->dev.class = nvme_subsys_class; 2233 subsys->dev.release = nvme_release_subsystem; 2234 subsys->dev.groups = nvme_subsys_attrs_groups; 2235 dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance); 2236 device_initialize(&subsys->dev); 2237 2238 mutex_lock(&nvme_subsystems_lock); 2239 found = __nvme_find_get_subsystem(subsys->subnqn); 2240 if (found) { 2241 /* 2242 * Verify that the subsystem actually supports multiple 2243 * controllers, else bail out. 2244 */ 2245 if (!(ctrl->opts && ctrl->opts->discovery_nqn) && 2246 nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) { 2247 dev_err(ctrl->device, 2248 "ignoring ctrl due to duplicate subnqn (%s).\n", 2249 found->subnqn); 2250 nvme_put_subsystem(found); 2251 ret = -EINVAL; 2252 goto out_unlock; 2253 } 2254 2255 __nvme_release_subsystem(subsys); 2256 subsys = found; 2257 } else { 2258 ret = device_add(&subsys->dev); 2259 if (ret) { 2260 dev_err(ctrl->device, 2261 "failed to register subsystem device.\n"); 2262 goto out_unlock; 2263 } 2264 ida_init(&subsys->ns_ida); 2265 list_add_tail(&subsys->entry, &nvme_subsystems); 2266 } 2267 2268 ctrl->subsys = subsys; 2269 mutex_unlock(&nvme_subsystems_lock); 2270 2271 if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2272 dev_name(ctrl->device))) { 2273 dev_err(ctrl->device, 2274 "failed to create sysfs link from subsystem.\n"); 2275 /* the transport driver will eventually put the subsystem */ 2276 return -EINVAL; 2277 } 2278 2279 mutex_lock(&subsys->lock); 2280 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2281 mutex_unlock(&subsys->lock); 2282 2283 return 0; 2284 2285 out_unlock: 2286 mutex_unlock(&nvme_subsystems_lock); 2287 put_device(&subsys->dev); 2288 return ret; 2289 } 2290 2291 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, 2292 void *log, size_t size, u64 offset) 2293 { 2294 struct nvme_command c = { }; 2295 unsigned long dwlen = size / 4 - 1; 2296 2297 c.get_log_page.opcode = nvme_admin_get_log_page; 2298 c.get_log_page.nsid = cpu_to_le32(nsid); 2299 c.get_log_page.lid = log_page; 2300 c.get_log_page.lsp = lsp; 2301 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 2302 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 2303 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 2304 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 2305 2306 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2307 } 2308 2309 static int nvme_get_effects_log(struct nvme_ctrl *ctrl) 2310 { 2311 int ret; 2312 2313 if (!ctrl->effects) 2314 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); 2315 2316 if (!ctrl->effects) 2317 return 0; 2318 2319 ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, 2320 ctrl->effects, sizeof(*ctrl->effects), 0); 2321 if (ret) { 2322 kfree(ctrl->effects); 2323 ctrl->effects = NULL; 2324 } 2325 return ret; 2326 } 2327 2328 /* 2329 * Initialize the cached copies of the Identify data and various controller 2330 * register in our nvme_ctrl structure. This should be called as soon as 2331 * the admin queue is fully up and running. 2332 */ 2333 int nvme_init_identify(struct nvme_ctrl *ctrl) 2334 { 2335 struct nvme_id_ctrl *id; 2336 u64 cap; 2337 int ret, page_shift; 2338 u32 max_hw_sectors; 2339 bool prev_apst_enabled; 2340 2341 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 2342 if (ret) { 2343 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 2344 return ret; 2345 } 2346 2347 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); 2348 if (ret) { 2349 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2350 return ret; 2351 } 2352 page_shift = NVME_CAP_MPSMIN(cap) + 12; 2353 2354 if (ctrl->vs >= NVME_VS(1, 1, 0)) 2355 ctrl->subsystem = NVME_CAP_NSSRC(cap); 2356 2357 ret = nvme_identify_ctrl(ctrl, &id); 2358 if (ret) { 2359 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 2360 return -EIO; 2361 } 2362 2363 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 2364 ret = nvme_get_effects_log(ctrl); 2365 if (ret < 0) 2366 goto out_free; 2367 } 2368 2369 if (!ctrl->identified) { 2370 int i; 2371 2372 ret = nvme_init_subsystem(ctrl, id); 2373 if (ret) 2374 goto out_free; 2375 2376 /* 2377 * Check for quirks. Quirk can depend on firmware version, 2378 * so, in principle, the set of quirks present can change 2379 * across a reset. As a possible future enhancement, we 2380 * could re-scan for quirks every time we reinitialize 2381 * the device, but we'd have to make sure that the driver 2382 * behaves intelligently if the quirks change. 2383 */ 2384 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 2385 if (quirk_matches(id, &core_quirks[i])) 2386 ctrl->quirks |= core_quirks[i].quirks; 2387 } 2388 } 2389 2390 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 2391 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 2392 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 2393 } 2394 2395 ctrl->oacs = le16_to_cpu(id->oacs); 2396 ctrl->oncs = le16_to_cpup(&id->oncs); 2397 ctrl->oaes = le32_to_cpu(id->oaes); 2398 atomic_set(&ctrl->abort_limit, id->acl + 1); 2399 ctrl->vwc = id->vwc; 2400 ctrl->cntlid = le16_to_cpup(&id->cntlid); 2401 if (id->mdts) 2402 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 2403 else 2404 max_hw_sectors = UINT_MAX; 2405 ctrl->max_hw_sectors = 2406 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 2407 2408 nvme_set_queue_limits(ctrl, ctrl->admin_q); 2409 ctrl->sgls = le32_to_cpu(id->sgls); 2410 ctrl->kas = le16_to_cpu(id->kas); 2411 ctrl->max_namespaces = le32_to_cpu(id->mnan); 2412 2413 if (id->rtd3e) { 2414 /* us -> s */ 2415 u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000; 2416 2417 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 2418 shutdown_timeout, 60); 2419 2420 if (ctrl->shutdown_timeout != shutdown_timeout) 2421 dev_info(ctrl->device, 2422 "Shutdown timeout set to %u seconds\n", 2423 ctrl->shutdown_timeout); 2424 } else 2425 ctrl->shutdown_timeout = shutdown_timeout; 2426 2427 ctrl->npss = id->npss; 2428 ctrl->apsta = id->apsta; 2429 prev_apst_enabled = ctrl->apst_enabled; 2430 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 2431 if (force_apst && id->apsta) { 2432 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 2433 ctrl->apst_enabled = true; 2434 } else { 2435 ctrl->apst_enabled = false; 2436 } 2437 } else { 2438 ctrl->apst_enabled = id->apsta; 2439 } 2440 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 2441 2442 if (ctrl->ops->flags & NVME_F_FABRICS) { 2443 ctrl->icdoff = le16_to_cpu(id->icdoff); 2444 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 2445 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 2446 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 2447 2448 /* 2449 * In fabrics we need to verify the cntlid matches the 2450 * admin connect 2451 */ 2452 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 2453 ret = -EINVAL; 2454 goto out_free; 2455 } 2456 2457 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 2458 dev_err(ctrl->device, 2459 "keep-alive support is mandatory for fabrics\n"); 2460 ret = -EINVAL; 2461 goto out_free; 2462 } 2463 } else { 2464 ctrl->cntlid = le16_to_cpu(id->cntlid); 2465 ctrl->hmpre = le32_to_cpu(id->hmpre); 2466 ctrl->hmmin = le32_to_cpu(id->hmmin); 2467 ctrl->hmminds = le32_to_cpu(id->hmminds); 2468 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 2469 } 2470 2471 ret = nvme_mpath_init(ctrl, id); 2472 kfree(id); 2473 2474 if (ret < 0) 2475 return ret; 2476 2477 if (ctrl->apst_enabled && !prev_apst_enabled) 2478 dev_pm_qos_expose_latency_tolerance(ctrl->device); 2479 else if (!ctrl->apst_enabled && prev_apst_enabled) 2480 dev_pm_qos_hide_latency_tolerance(ctrl->device); 2481 2482 ret = nvme_configure_apst(ctrl); 2483 if (ret < 0) 2484 return ret; 2485 2486 ret = nvme_configure_timestamp(ctrl); 2487 if (ret < 0) 2488 return ret; 2489 2490 ret = nvme_configure_directives(ctrl); 2491 if (ret < 0) 2492 return ret; 2493 2494 ctrl->identified = true; 2495 2496 return 0; 2497 2498 out_free: 2499 kfree(id); 2500 return ret; 2501 } 2502 EXPORT_SYMBOL_GPL(nvme_init_identify); 2503 2504 static int nvme_dev_open(struct inode *inode, struct file *file) 2505 { 2506 struct nvme_ctrl *ctrl = 2507 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 2508 2509 switch (ctrl->state) { 2510 case NVME_CTRL_LIVE: 2511 case NVME_CTRL_ADMIN_ONLY: 2512 break; 2513 default: 2514 return -EWOULDBLOCK; 2515 } 2516 2517 file->private_data = ctrl; 2518 return 0; 2519 } 2520 2521 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 2522 { 2523 struct nvme_ns *ns; 2524 int ret; 2525 2526 down_read(&ctrl->namespaces_rwsem); 2527 if (list_empty(&ctrl->namespaces)) { 2528 ret = -ENOTTY; 2529 goto out_unlock; 2530 } 2531 2532 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 2533 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 2534 dev_warn(ctrl->device, 2535 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 2536 ret = -EINVAL; 2537 goto out_unlock; 2538 } 2539 2540 dev_warn(ctrl->device, 2541 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 2542 kref_get(&ns->kref); 2543 up_read(&ctrl->namespaces_rwsem); 2544 2545 ret = nvme_user_cmd(ctrl, ns, argp); 2546 nvme_put_ns(ns); 2547 return ret; 2548 2549 out_unlock: 2550 up_read(&ctrl->namespaces_rwsem); 2551 return ret; 2552 } 2553 2554 static long nvme_dev_ioctl(struct file *file, unsigned int cmd, 2555 unsigned long arg) 2556 { 2557 struct nvme_ctrl *ctrl = file->private_data; 2558 void __user *argp = (void __user *)arg; 2559 2560 switch (cmd) { 2561 case NVME_IOCTL_ADMIN_CMD: 2562 return nvme_user_cmd(ctrl, NULL, argp); 2563 case NVME_IOCTL_IO_CMD: 2564 return nvme_dev_user_cmd(ctrl, argp); 2565 case NVME_IOCTL_RESET: 2566 dev_warn(ctrl->device, "resetting controller\n"); 2567 return nvme_reset_ctrl_sync(ctrl); 2568 case NVME_IOCTL_SUBSYS_RESET: 2569 return nvme_reset_subsystem(ctrl); 2570 case NVME_IOCTL_RESCAN: 2571 nvme_queue_scan(ctrl); 2572 return 0; 2573 default: 2574 return -ENOTTY; 2575 } 2576 } 2577 2578 static const struct file_operations nvme_dev_fops = { 2579 .owner = THIS_MODULE, 2580 .open = nvme_dev_open, 2581 .unlocked_ioctl = nvme_dev_ioctl, 2582 .compat_ioctl = nvme_dev_ioctl, 2583 }; 2584 2585 static ssize_t nvme_sysfs_reset(struct device *dev, 2586 struct device_attribute *attr, const char *buf, 2587 size_t count) 2588 { 2589 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2590 int ret; 2591 2592 ret = nvme_reset_ctrl_sync(ctrl); 2593 if (ret < 0) 2594 return ret; 2595 return count; 2596 } 2597 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 2598 2599 static ssize_t nvme_sysfs_rescan(struct device *dev, 2600 struct device_attribute *attr, const char *buf, 2601 size_t count) 2602 { 2603 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2604 2605 nvme_queue_scan(ctrl); 2606 return count; 2607 } 2608 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 2609 2610 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 2611 { 2612 struct gendisk *disk = dev_to_disk(dev); 2613 2614 if (disk->fops == &nvme_fops) 2615 return nvme_get_ns_from_dev(dev)->head; 2616 else 2617 return disk->private_data; 2618 } 2619 2620 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 2621 char *buf) 2622 { 2623 struct nvme_ns_head *head = dev_to_ns_head(dev); 2624 struct nvme_ns_ids *ids = &head->ids; 2625 struct nvme_subsystem *subsys = head->subsys; 2626 int serial_len = sizeof(subsys->serial); 2627 int model_len = sizeof(subsys->model); 2628 2629 if (!uuid_is_null(&ids->uuid)) 2630 return sprintf(buf, "uuid.%pU\n", &ids->uuid); 2631 2632 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2633 return sprintf(buf, "eui.%16phN\n", ids->nguid); 2634 2635 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 2636 return sprintf(buf, "eui.%8phN\n", ids->eui64); 2637 2638 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 2639 subsys->serial[serial_len - 1] == '\0')) 2640 serial_len--; 2641 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 2642 subsys->model[model_len - 1] == '\0')) 2643 model_len--; 2644 2645 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 2646 serial_len, subsys->serial, model_len, subsys->model, 2647 head->ns_id); 2648 } 2649 static DEVICE_ATTR_RO(wwid); 2650 2651 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 2652 char *buf) 2653 { 2654 return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 2655 } 2656 static DEVICE_ATTR_RO(nguid); 2657 2658 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 2659 char *buf) 2660 { 2661 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 2662 2663 /* For backward compatibility expose the NGUID to userspace if 2664 * we have no UUID set 2665 */ 2666 if (uuid_is_null(&ids->uuid)) { 2667 printk_ratelimited(KERN_WARNING 2668 "No UUID available providing old NGUID\n"); 2669 return sprintf(buf, "%pU\n", ids->nguid); 2670 } 2671 return sprintf(buf, "%pU\n", &ids->uuid); 2672 } 2673 static DEVICE_ATTR_RO(uuid); 2674 2675 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 2676 char *buf) 2677 { 2678 return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 2679 } 2680 static DEVICE_ATTR_RO(eui); 2681 2682 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 2683 char *buf) 2684 { 2685 return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 2686 } 2687 static DEVICE_ATTR_RO(nsid); 2688 2689 static struct attribute *nvme_ns_id_attrs[] = { 2690 &dev_attr_wwid.attr, 2691 &dev_attr_uuid.attr, 2692 &dev_attr_nguid.attr, 2693 &dev_attr_eui.attr, 2694 &dev_attr_nsid.attr, 2695 #ifdef CONFIG_NVME_MULTIPATH 2696 &dev_attr_ana_grpid.attr, 2697 &dev_attr_ana_state.attr, 2698 #endif 2699 NULL, 2700 }; 2701 2702 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, 2703 struct attribute *a, int n) 2704 { 2705 struct device *dev = container_of(kobj, struct device, kobj); 2706 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 2707 2708 if (a == &dev_attr_uuid.attr) { 2709 if (uuid_is_null(&ids->uuid) && 2710 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2711 return 0; 2712 } 2713 if (a == &dev_attr_nguid.attr) { 2714 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2715 return 0; 2716 } 2717 if (a == &dev_attr_eui.attr) { 2718 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 2719 return 0; 2720 } 2721 #ifdef CONFIG_NVME_MULTIPATH 2722 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { 2723 if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */ 2724 return 0; 2725 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) 2726 return 0; 2727 } 2728 #endif 2729 return a->mode; 2730 } 2731 2732 const struct attribute_group nvme_ns_id_attr_group = { 2733 .attrs = nvme_ns_id_attrs, 2734 .is_visible = nvme_ns_id_attrs_are_visible, 2735 }; 2736 2737 #define nvme_show_str_function(field) \ 2738 static ssize_t field##_show(struct device *dev, \ 2739 struct device_attribute *attr, char *buf) \ 2740 { \ 2741 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 2742 return sprintf(buf, "%.*s\n", \ 2743 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 2744 } \ 2745 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 2746 2747 nvme_show_str_function(model); 2748 nvme_show_str_function(serial); 2749 nvme_show_str_function(firmware_rev); 2750 2751 #define nvme_show_int_function(field) \ 2752 static ssize_t field##_show(struct device *dev, \ 2753 struct device_attribute *attr, char *buf) \ 2754 { \ 2755 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 2756 return sprintf(buf, "%d\n", ctrl->field); \ 2757 } \ 2758 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 2759 2760 nvme_show_int_function(cntlid); 2761 2762 static ssize_t nvme_sysfs_delete(struct device *dev, 2763 struct device_attribute *attr, const char *buf, 2764 size_t count) 2765 { 2766 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2767 2768 if (device_remove_file_self(dev, attr)) 2769 nvme_delete_ctrl_sync(ctrl); 2770 return count; 2771 } 2772 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 2773 2774 static ssize_t nvme_sysfs_show_transport(struct device *dev, 2775 struct device_attribute *attr, 2776 char *buf) 2777 { 2778 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2779 2780 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); 2781 } 2782 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 2783 2784 static ssize_t nvme_sysfs_show_state(struct device *dev, 2785 struct device_attribute *attr, 2786 char *buf) 2787 { 2788 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2789 static const char *const state_name[] = { 2790 [NVME_CTRL_NEW] = "new", 2791 [NVME_CTRL_LIVE] = "live", 2792 [NVME_CTRL_ADMIN_ONLY] = "only-admin", 2793 [NVME_CTRL_RESETTING] = "resetting", 2794 [NVME_CTRL_CONNECTING] = "connecting", 2795 [NVME_CTRL_DELETING] = "deleting", 2796 [NVME_CTRL_DEAD] = "dead", 2797 }; 2798 2799 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 2800 state_name[ctrl->state]) 2801 return sprintf(buf, "%s\n", state_name[ctrl->state]); 2802 2803 return sprintf(buf, "unknown state\n"); 2804 } 2805 2806 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 2807 2808 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 2809 struct device_attribute *attr, 2810 char *buf) 2811 { 2812 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2813 2814 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); 2815 } 2816 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 2817 2818 static ssize_t nvme_sysfs_show_address(struct device *dev, 2819 struct device_attribute *attr, 2820 char *buf) 2821 { 2822 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2823 2824 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 2825 } 2826 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 2827 2828 static struct attribute *nvme_dev_attrs[] = { 2829 &dev_attr_reset_controller.attr, 2830 &dev_attr_rescan_controller.attr, 2831 &dev_attr_model.attr, 2832 &dev_attr_serial.attr, 2833 &dev_attr_firmware_rev.attr, 2834 &dev_attr_cntlid.attr, 2835 &dev_attr_delete_controller.attr, 2836 &dev_attr_transport.attr, 2837 &dev_attr_subsysnqn.attr, 2838 &dev_attr_address.attr, 2839 &dev_attr_state.attr, 2840 NULL 2841 }; 2842 2843 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 2844 struct attribute *a, int n) 2845 { 2846 struct device *dev = container_of(kobj, struct device, kobj); 2847 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2848 2849 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 2850 return 0; 2851 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 2852 return 0; 2853 2854 return a->mode; 2855 } 2856 2857 static struct attribute_group nvme_dev_attrs_group = { 2858 .attrs = nvme_dev_attrs, 2859 .is_visible = nvme_dev_attrs_are_visible, 2860 }; 2861 2862 static const struct attribute_group *nvme_dev_attr_groups[] = { 2863 &nvme_dev_attrs_group, 2864 NULL, 2865 }; 2866 2867 static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys, 2868 unsigned nsid) 2869 { 2870 struct nvme_ns_head *h; 2871 2872 lockdep_assert_held(&subsys->lock); 2873 2874 list_for_each_entry(h, &subsys->nsheads, entry) { 2875 if (h->ns_id == nsid && kref_get_unless_zero(&h->ref)) 2876 return h; 2877 } 2878 2879 return NULL; 2880 } 2881 2882 static int __nvme_check_ids(struct nvme_subsystem *subsys, 2883 struct nvme_ns_head *new) 2884 { 2885 struct nvme_ns_head *h; 2886 2887 lockdep_assert_held(&subsys->lock); 2888 2889 list_for_each_entry(h, &subsys->nsheads, entry) { 2890 if (nvme_ns_ids_valid(&new->ids) && 2891 !list_empty(&h->list) && 2892 nvme_ns_ids_equal(&new->ids, &h->ids)) 2893 return -EINVAL; 2894 } 2895 2896 return 0; 2897 } 2898 2899 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 2900 unsigned nsid, struct nvme_id_ns *id) 2901 { 2902 struct nvme_ns_head *head; 2903 int ret = -ENOMEM; 2904 2905 head = kzalloc(sizeof(*head), GFP_KERNEL); 2906 if (!head) 2907 goto out; 2908 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); 2909 if (ret < 0) 2910 goto out_free_head; 2911 head->instance = ret; 2912 INIT_LIST_HEAD(&head->list); 2913 ret = init_srcu_struct(&head->srcu); 2914 if (ret) 2915 goto out_ida_remove; 2916 head->subsys = ctrl->subsys; 2917 head->ns_id = nsid; 2918 kref_init(&head->ref); 2919 2920 nvme_report_ns_ids(ctrl, nsid, id, &head->ids); 2921 2922 ret = __nvme_check_ids(ctrl->subsys, head); 2923 if (ret) { 2924 dev_err(ctrl->device, 2925 "duplicate IDs for nsid %d\n", nsid); 2926 goto out_cleanup_srcu; 2927 } 2928 2929 ret = nvme_mpath_alloc_disk(ctrl, head); 2930 if (ret) 2931 goto out_cleanup_srcu; 2932 2933 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 2934 2935 kref_get(&ctrl->subsys->ref); 2936 2937 return head; 2938 out_cleanup_srcu: 2939 cleanup_srcu_struct(&head->srcu); 2940 out_ida_remove: 2941 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); 2942 out_free_head: 2943 kfree(head); 2944 out: 2945 return ERR_PTR(ret); 2946 } 2947 2948 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, 2949 struct nvme_id_ns *id) 2950 { 2951 struct nvme_ctrl *ctrl = ns->ctrl; 2952 bool is_shared = id->nmic & (1 << 0); 2953 struct nvme_ns_head *head = NULL; 2954 int ret = 0; 2955 2956 mutex_lock(&ctrl->subsys->lock); 2957 if (is_shared) 2958 head = __nvme_find_ns_head(ctrl->subsys, nsid); 2959 if (!head) { 2960 head = nvme_alloc_ns_head(ctrl, nsid, id); 2961 if (IS_ERR(head)) { 2962 ret = PTR_ERR(head); 2963 goto out_unlock; 2964 } 2965 } else { 2966 struct nvme_ns_ids ids; 2967 2968 nvme_report_ns_ids(ctrl, nsid, id, &ids); 2969 if (!nvme_ns_ids_equal(&head->ids, &ids)) { 2970 dev_err(ctrl->device, 2971 "IDs don't match for shared namespace %d\n", 2972 nsid); 2973 ret = -EINVAL; 2974 goto out_unlock; 2975 } 2976 } 2977 2978 list_add_tail(&ns->siblings, &head->list); 2979 ns->head = head; 2980 2981 out_unlock: 2982 mutex_unlock(&ctrl->subsys->lock); 2983 return ret; 2984 } 2985 2986 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) 2987 { 2988 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 2989 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 2990 2991 return nsa->head->ns_id - nsb->head->ns_id; 2992 } 2993 2994 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 2995 { 2996 struct nvme_ns *ns, *ret = NULL; 2997 2998 down_read(&ctrl->namespaces_rwsem); 2999 list_for_each_entry(ns, &ctrl->namespaces, list) { 3000 if (ns->head->ns_id == nsid) { 3001 if (!kref_get_unless_zero(&ns->kref)) 3002 continue; 3003 ret = ns; 3004 break; 3005 } 3006 if (ns->head->ns_id > nsid) 3007 break; 3008 } 3009 up_read(&ctrl->namespaces_rwsem); 3010 return ret; 3011 } 3012 3013 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns) 3014 { 3015 struct streams_directive_params s; 3016 int ret; 3017 3018 if (!ctrl->nr_streams) 3019 return 0; 3020 3021 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); 3022 if (ret) 3023 return ret; 3024 3025 ns->sws = le32_to_cpu(s.sws); 3026 ns->sgs = le16_to_cpu(s.sgs); 3027 3028 if (ns->sws) { 3029 unsigned int bs = 1 << ns->lba_shift; 3030 3031 blk_queue_io_min(ns->queue, bs * ns->sws); 3032 if (ns->sgs) 3033 blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs); 3034 } 3035 3036 return 0; 3037 } 3038 3039 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3040 { 3041 struct nvme_ns *ns; 3042 struct gendisk *disk; 3043 struct nvme_id_ns *id; 3044 char disk_name[DISK_NAME_LEN]; 3045 int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; 3046 3047 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 3048 if (!ns) 3049 return; 3050 3051 ns->queue = blk_mq_init_queue(ctrl->tagset); 3052 if (IS_ERR(ns->queue)) 3053 goto out_free_ns; 3054 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 3055 ns->queue->queuedata = ns; 3056 ns->ctrl = ctrl; 3057 3058 kref_init(&ns->kref); 3059 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 3060 3061 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 3062 nvme_set_queue_limits(ctrl, ns->queue); 3063 3064 id = nvme_identify_ns(ctrl, nsid); 3065 if (!id) 3066 goto out_free_queue; 3067 3068 if (id->ncap == 0) 3069 goto out_free_id; 3070 3071 if (nvme_init_ns_head(ns, nsid, id)) 3072 goto out_free_id; 3073 nvme_setup_streams_ns(ctrl, ns); 3074 nvme_set_disk_name(disk_name, ns, ctrl, &flags); 3075 3076 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { 3077 if (nvme_nvm_register(ns, disk_name, node)) { 3078 dev_warn(ctrl->device, "LightNVM init failure\n"); 3079 goto out_unlink_ns; 3080 } 3081 } 3082 3083 disk = alloc_disk_node(0, node); 3084 if (!disk) 3085 goto out_unlink_ns; 3086 3087 disk->fops = &nvme_fops; 3088 disk->private_data = ns; 3089 disk->queue = ns->queue; 3090 disk->flags = flags; 3091 memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); 3092 ns->disk = disk; 3093 3094 __nvme_revalidate_disk(disk, id); 3095 3096 down_write(&ctrl->namespaces_rwsem); 3097 list_add_tail(&ns->list, &ctrl->namespaces); 3098 up_write(&ctrl->namespaces_rwsem); 3099 3100 nvme_get_ctrl(ctrl); 3101 3102 device_add_disk(ctrl->device, ns->disk); 3103 if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, 3104 &nvme_ns_id_attr_group)) 3105 pr_warn("%s: failed to create sysfs group for identification\n", 3106 ns->disk->disk_name); 3107 if (ns->ndev && nvme_nvm_register_sysfs(ns)) 3108 pr_warn("%s: failed to register lightnvm sysfs group for identification\n", 3109 ns->disk->disk_name); 3110 3111 nvme_mpath_add_disk(ns, id); 3112 nvme_fault_inject_init(ns); 3113 kfree(id); 3114 3115 return; 3116 out_unlink_ns: 3117 mutex_lock(&ctrl->subsys->lock); 3118 list_del_rcu(&ns->siblings); 3119 mutex_unlock(&ctrl->subsys->lock); 3120 out_free_id: 3121 kfree(id); 3122 out_free_queue: 3123 blk_cleanup_queue(ns->queue); 3124 out_free_ns: 3125 kfree(ns); 3126 } 3127 3128 static void nvme_ns_remove(struct nvme_ns *ns) 3129 { 3130 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 3131 return; 3132 3133 nvme_fault_inject_fini(ns); 3134 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 3135 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 3136 &nvme_ns_id_attr_group); 3137 if (ns->ndev) 3138 nvme_nvm_unregister_sysfs(ns); 3139 del_gendisk(ns->disk); 3140 blk_cleanup_queue(ns->queue); 3141 if (blk_get_integrity(ns->disk)) 3142 blk_integrity_unregister(ns->disk); 3143 } 3144 3145 mutex_lock(&ns->ctrl->subsys->lock); 3146 nvme_mpath_clear_current_path(ns); 3147 list_del_rcu(&ns->siblings); 3148 mutex_unlock(&ns->ctrl->subsys->lock); 3149 3150 down_write(&ns->ctrl->namespaces_rwsem); 3151 list_del_init(&ns->list); 3152 up_write(&ns->ctrl->namespaces_rwsem); 3153 3154 synchronize_srcu(&ns->head->srcu); 3155 nvme_mpath_check_last_path(ns); 3156 nvme_put_ns(ns); 3157 } 3158 3159 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3160 { 3161 struct nvme_ns *ns; 3162 3163 ns = nvme_find_get_ns(ctrl, nsid); 3164 if (ns) { 3165 if (ns->disk && revalidate_disk(ns->disk)) 3166 nvme_ns_remove(ns); 3167 nvme_put_ns(ns); 3168 } else 3169 nvme_alloc_ns(ctrl, nsid); 3170 } 3171 3172 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 3173 unsigned nsid) 3174 { 3175 struct nvme_ns *ns, *next; 3176 LIST_HEAD(rm_list); 3177 3178 down_write(&ctrl->namespaces_rwsem); 3179 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3180 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) 3181 list_move_tail(&ns->list, &rm_list); 3182 } 3183 up_write(&ctrl->namespaces_rwsem); 3184 3185 list_for_each_entry_safe(ns, next, &rm_list, list) 3186 nvme_ns_remove(ns); 3187 3188 } 3189 3190 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) 3191 { 3192 struct nvme_ns *ns; 3193 __le32 *ns_list; 3194 unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); 3195 int ret = 0; 3196 3197 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 3198 if (!ns_list) 3199 return -ENOMEM; 3200 3201 for (i = 0; i < num_lists; i++) { 3202 ret = nvme_identify_ns_list(ctrl, prev, ns_list); 3203 if (ret) 3204 goto free; 3205 3206 for (j = 0; j < min(nn, 1024U); j++) { 3207 nsid = le32_to_cpu(ns_list[j]); 3208 if (!nsid) 3209 goto out; 3210 3211 nvme_validate_ns(ctrl, nsid); 3212 3213 while (++prev < nsid) { 3214 ns = nvme_find_get_ns(ctrl, prev); 3215 if (ns) { 3216 nvme_ns_remove(ns); 3217 nvme_put_ns(ns); 3218 } 3219 } 3220 } 3221 nn -= j; 3222 } 3223 out: 3224 nvme_remove_invalid_namespaces(ctrl, prev); 3225 free: 3226 kfree(ns_list); 3227 return ret; 3228 } 3229 3230 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) 3231 { 3232 unsigned i; 3233 3234 for (i = 1; i <= nn; i++) 3235 nvme_validate_ns(ctrl, i); 3236 3237 nvme_remove_invalid_namespaces(ctrl, nn); 3238 } 3239 3240 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 3241 { 3242 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 3243 __le32 *log; 3244 int error; 3245 3246 log = kzalloc(log_size, GFP_KERNEL); 3247 if (!log) 3248 return; 3249 3250 /* 3251 * We need to read the log to clear the AEN, but we don't want to rely 3252 * on it for the changed namespace information as userspace could have 3253 * raced with us in reading the log page, which could cause us to miss 3254 * updates. 3255 */ 3256 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log, 3257 log_size, 0); 3258 if (error) 3259 dev_warn(ctrl->device, 3260 "reading changed ns log failed: %d\n", error); 3261 3262 kfree(log); 3263 } 3264 3265 static void nvme_scan_work(struct work_struct *work) 3266 { 3267 struct nvme_ctrl *ctrl = 3268 container_of(work, struct nvme_ctrl, scan_work); 3269 struct nvme_id_ctrl *id; 3270 unsigned nn; 3271 3272 if (ctrl->state != NVME_CTRL_LIVE) 3273 return; 3274 3275 WARN_ON_ONCE(!ctrl->tagset); 3276 3277 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 3278 dev_info(ctrl->device, "rescanning namespaces.\n"); 3279 nvme_clear_changed_ns_log(ctrl); 3280 } 3281 3282 if (nvme_identify_ctrl(ctrl, &id)) 3283 return; 3284 3285 nn = le32_to_cpu(id->nn); 3286 if (ctrl->vs >= NVME_VS(1, 1, 0) && 3287 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 3288 if (!nvme_scan_ns_list(ctrl, nn)) 3289 goto out_free_id; 3290 } 3291 nvme_scan_ns_sequential(ctrl, nn); 3292 out_free_id: 3293 kfree(id); 3294 down_write(&ctrl->namespaces_rwsem); 3295 list_sort(NULL, &ctrl->namespaces, ns_cmp); 3296 up_write(&ctrl->namespaces_rwsem); 3297 } 3298 3299 /* 3300 * This function iterates the namespace list unlocked to allow recovery from 3301 * controller failure. It is up to the caller to ensure the namespace list is 3302 * not modified by scan work while this function is executing. 3303 */ 3304 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 3305 { 3306 struct nvme_ns *ns, *next; 3307 LIST_HEAD(ns_list); 3308 3309 /* 3310 * The dead states indicates the controller was not gracefully 3311 * disconnected. In that case, we won't be able to flush any data while 3312 * removing the namespaces' disks; fail all the queues now to avoid 3313 * potentially having to clean up the failed sync later. 3314 */ 3315 if (ctrl->state == NVME_CTRL_DEAD) 3316 nvme_kill_queues(ctrl); 3317 3318 down_write(&ctrl->namespaces_rwsem); 3319 list_splice_init(&ctrl->namespaces, &ns_list); 3320 up_write(&ctrl->namespaces_rwsem); 3321 3322 list_for_each_entry_safe(ns, next, &ns_list, list) 3323 nvme_ns_remove(ns); 3324 } 3325 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 3326 3327 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 3328 { 3329 char *envp[2] = { NULL, NULL }; 3330 u32 aen_result = ctrl->aen_result; 3331 3332 ctrl->aen_result = 0; 3333 if (!aen_result) 3334 return; 3335 3336 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 3337 if (!envp[0]) 3338 return; 3339 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 3340 kfree(envp[0]); 3341 } 3342 3343 static void nvme_async_event_work(struct work_struct *work) 3344 { 3345 struct nvme_ctrl *ctrl = 3346 container_of(work, struct nvme_ctrl, async_event_work); 3347 3348 nvme_aen_uevent(ctrl); 3349 ctrl->ops->submit_async_event(ctrl); 3350 } 3351 3352 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 3353 { 3354 3355 u32 csts; 3356 3357 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 3358 return false; 3359 3360 if (csts == ~0) 3361 return false; 3362 3363 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 3364 } 3365 3366 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 3367 { 3368 struct nvme_fw_slot_info_log *log; 3369 3370 log = kmalloc(sizeof(*log), GFP_KERNEL); 3371 if (!log) 3372 return; 3373 3374 if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, 3375 sizeof(*log), 0)) 3376 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 3377 kfree(log); 3378 } 3379 3380 static void nvme_fw_act_work(struct work_struct *work) 3381 { 3382 struct nvme_ctrl *ctrl = container_of(work, 3383 struct nvme_ctrl, fw_act_work); 3384 unsigned long fw_act_timeout; 3385 3386 if (ctrl->mtfa) 3387 fw_act_timeout = jiffies + 3388 msecs_to_jiffies(ctrl->mtfa * 100); 3389 else 3390 fw_act_timeout = jiffies + 3391 msecs_to_jiffies(admin_timeout * 1000); 3392 3393 nvme_stop_queues(ctrl); 3394 while (nvme_ctrl_pp_status(ctrl)) { 3395 if (time_after(jiffies, fw_act_timeout)) { 3396 dev_warn(ctrl->device, 3397 "Fw activation timeout, reset controller\n"); 3398 nvme_reset_ctrl(ctrl); 3399 break; 3400 } 3401 msleep(100); 3402 } 3403 3404 if (ctrl->state != NVME_CTRL_LIVE) 3405 return; 3406 3407 nvme_start_queues(ctrl); 3408 /* read FW slot information to clear the AER */ 3409 nvme_get_fw_slot_info(ctrl); 3410 } 3411 3412 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 3413 { 3414 switch ((result & 0xff00) >> 8) { 3415 case NVME_AER_NOTICE_NS_CHANGED: 3416 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 3417 nvme_queue_scan(ctrl); 3418 break; 3419 case NVME_AER_NOTICE_FW_ACT_STARTING: 3420 queue_work(nvme_wq, &ctrl->fw_act_work); 3421 break; 3422 #ifdef CONFIG_NVME_MULTIPATH 3423 case NVME_AER_NOTICE_ANA: 3424 if (!ctrl->ana_log_buf) 3425 break; 3426 queue_work(nvme_wq, &ctrl->ana_work); 3427 break; 3428 #endif 3429 default: 3430 dev_warn(ctrl->device, "async event result %08x\n", result); 3431 } 3432 } 3433 3434 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 3435 volatile union nvme_result *res) 3436 { 3437 u32 result = le32_to_cpu(res->u32); 3438 3439 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 3440 return; 3441 3442 switch (result & 0x7) { 3443 case NVME_AER_NOTICE: 3444 nvme_handle_aen_notice(ctrl, result); 3445 break; 3446 case NVME_AER_ERROR: 3447 case NVME_AER_SMART: 3448 case NVME_AER_CSS: 3449 case NVME_AER_VS: 3450 ctrl->aen_result = result; 3451 break; 3452 default: 3453 break; 3454 } 3455 queue_work(nvme_wq, &ctrl->async_event_work); 3456 } 3457 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 3458 3459 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 3460 { 3461 nvme_mpath_stop(ctrl); 3462 nvme_stop_keep_alive(ctrl); 3463 flush_work(&ctrl->async_event_work); 3464 flush_work(&ctrl->scan_work); 3465 cancel_work_sync(&ctrl->fw_act_work); 3466 if (ctrl->ops->stop_ctrl) 3467 ctrl->ops->stop_ctrl(ctrl); 3468 } 3469 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 3470 3471 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 3472 { 3473 if (ctrl->kato) 3474 nvme_start_keep_alive(ctrl); 3475 3476 if (ctrl->queue_count > 1) { 3477 nvme_queue_scan(ctrl); 3478 nvme_enable_aen(ctrl); 3479 queue_work(nvme_wq, &ctrl->async_event_work); 3480 nvme_start_queues(ctrl); 3481 } 3482 } 3483 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 3484 3485 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 3486 { 3487 cdev_device_del(&ctrl->cdev, ctrl->device); 3488 } 3489 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 3490 3491 static void nvme_free_ctrl(struct device *dev) 3492 { 3493 struct nvme_ctrl *ctrl = 3494 container_of(dev, struct nvme_ctrl, ctrl_device); 3495 struct nvme_subsystem *subsys = ctrl->subsys; 3496 3497 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 3498 kfree(ctrl->effects); 3499 nvme_mpath_uninit(ctrl); 3500 3501 if (subsys) { 3502 mutex_lock(&subsys->lock); 3503 list_del(&ctrl->subsys_entry); 3504 mutex_unlock(&subsys->lock); 3505 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 3506 } 3507 3508 ctrl->ops->free_ctrl(ctrl); 3509 3510 if (subsys) 3511 nvme_put_subsystem(subsys); 3512 } 3513 3514 /* 3515 * Initialize a NVMe controller structures. This needs to be called during 3516 * earliest initialization so that we have the initialized structured around 3517 * during probing. 3518 */ 3519 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 3520 const struct nvme_ctrl_ops *ops, unsigned long quirks) 3521 { 3522 int ret; 3523 3524 ctrl->state = NVME_CTRL_NEW; 3525 spin_lock_init(&ctrl->lock); 3526 INIT_LIST_HEAD(&ctrl->namespaces); 3527 init_rwsem(&ctrl->namespaces_rwsem); 3528 ctrl->dev = dev; 3529 ctrl->ops = ops; 3530 ctrl->quirks = quirks; 3531 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 3532 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 3533 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 3534 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 3535 3536 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 3537 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 3538 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 3539 3540 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); 3541 if (ret < 0) 3542 goto out; 3543 ctrl->instance = ret; 3544 3545 device_initialize(&ctrl->ctrl_device); 3546 ctrl->device = &ctrl->ctrl_device; 3547 ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance); 3548 ctrl->device->class = nvme_class; 3549 ctrl->device->parent = ctrl->dev; 3550 ctrl->device->groups = nvme_dev_attr_groups; 3551 ctrl->device->release = nvme_free_ctrl; 3552 dev_set_drvdata(ctrl->device, ctrl); 3553 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 3554 if (ret) 3555 goto out_release_instance; 3556 3557 cdev_init(&ctrl->cdev, &nvme_dev_fops); 3558 ctrl->cdev.owner = ops->module; 3559 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 3560 if (ret) 3561 goto out_free_name; 3562 3563 /* 3564 * Initialize latency tolerance controls. The sysfs files won't 3565 * be visible to userspace unless the device actually supports APST. 3566 */ 3567 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 3568 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 3569 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 3570 3571 return 0; 3572 out_free_name: 3573 kfree_const(dev->kobj.name); 3574 out_release_instance: 3575 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 3576 out: 3577 return ret; 3578 } 3579 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 3580 3581 /** 3582 * nvme_kill_queues(): Ends all namespace queues 3583 * @ctrl: the dead controller that needs to end 3584 * 3585 * Call this function when the driver determines it is unable to get the 3586 * controller in a state capable of servicing IO. 3587 */ 3588 void nvme_kill_queues(struct nvme_ctrl *ctrl) 3589 { 3590 struct nvme_ns *ns; 3591 3592 down_read(&ctrl->namespaces_rwsem); 3593 3594 /* Forcibly unquiesce queues to avoid blocking dispatch */ 3595 if (ctrl->admin_q) 3596 blk_mq_unquiesce_queue(ctrl->admin_q); 3597 3598 list_for_each_entry(ns, &ctrl->namespaces, list) 3599 nvme_set_queue_dying(ns); 3600 3601 up_read(&ctrl->namespaces_rwsem); 3602 } 3603 EXPORT_SYMBOL_GPL(nvme_kill_queues); 3604 3605 void nvme_unfreeze(struct nvme_ctrl *ctrl) 3606 { 3607 struct nvme_ns *ns; 3608 3609 down_read(&ctrl->namespaces_rwsem); 3610 list_for_each_entry(ns, &ctrl->namespaces, list) 3611 blk_mq_unfreeze_queue(ns->queue); 3612 up_read(&ctrl->namespaces_rwsem); 3613 } 3614 EXPORT_SYMBOL_GPL(nvme_unfreeze); 3615 3616 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 3617 { 3618 struct nvme_ns *ns; 3619 3620 down_read(&ctrl->namespaces_rwsem); 3621 list_for_each_entry(ns, &ctrl->namespaces, list) { 3622 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 3623 if (timeout <= 0) 3624 break; 3625 } 3626 up_read(&ctrl->namespaces_rwsem); 3627 } 3628 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 3629 3630 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 3631 { 3632 struct nvme_ns *ns; 3633 3634 down_read(&ctrl->namespaces_rwsem); 3635 list_for_each_entry(ns, &ctrl->namespaces, list) 3636 blk_mq_freeze_queue_wait(ns->queue); 3637 up_read(&ctrl->namespaces_rwsem); 3638 } 3639 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 3640 3641 void nvme_start_freeze(struct nvme_ctrl *ctrl) 3642 { 3643 struct nvme_ns *ns; 3644 3645 down_read(&ctrl->namespaces_rwsem); 3646 list_for_each_entry(ns, &ctrl->namespaces, list) 3647 blk_freeze_queue_start(ns->queue); 3648 up_read(&ctrl->namespaces_rwsem); 3649 } 3650 EXPORT_SYMBOL_GPL(nvme_start_freeze); 3651 3652 void nvme_stop_queues(struct nvme_ctrl *ctrl) 3653 { 3654 struct nvme_ns *ns; 3655 3656 down_read(&ctrl->namespaces_rwsem); 3657 list_for_each_entry(ns, &ctrl->namespaces, list) 3658 blk_mq_quiesce_queue(ns->queue); 3659 up_read(&ctrl->namespaces_rwsem); 3660 } 3661 EXPORT_SYMBOL_GPL(nvme_stop_queues); 3662 3663 void nvme_start_queues(struct nvme_ctrl *ctrl) 3664 { 3665 struct nvme_ns *ns; 3666 3667 down_read(&ctrl->namespaces_rwsem); 3668 list_for_each_entry(ns, &ctrl->namespaces, list) 3669 blk_mq_unquiesce_queue(ns->queue); 3670 up_read(&ctrl->namespaces_rwsem); 3671 } 3672 EXPORT_SYMBOL_GPL(nvme_start_queues); 3673 3674 int __init nvme_core_init(void) 3675 { 3676 int result = -ENOMEM; 3677 3678 nvme_wq = alloc_workqueue("nvme-wq", 3679 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 3680 if (!nvme_wq) 3681 goto out; 3682 3683 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 3684 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 3685 if (!nvme_reset_wq) 3686 goto destroy_wq; 3687 3688 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 3689 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 3690 if (!nvme_delete_wq) 3691 goto destroy_reset_wq; 3692 3693 result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme"); 3694 if (result < 0) 3695 goto destroy_delete_wq; 3696 3697 nvme_class = class_create(THIS_MODULE, "nvme"); 3698 if (IS_ERR(nvme_class)) { 3699 result = PTR_ERR(nvme_class); 3700 goto unregister_chrdev; 3701 } 3702 3703 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); 3704 if (IS_ERR(nvme_subsys_class)) { 3705 result = PTR_ERR(nvme_subsys_class); 3706 goto destroy_class; 3707 } 3708 return 0; 3709 3710 destroy_class: 3711 class_destroy(nvme_class); 3712 unregister_chrdev: 3713 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 3714 destroy_delete_wq: 3715 destroy_workqueue(nvme_delete_wq); 3716 destroy_reset_wq: 3717 destroy_workqueue(nvme_reset_wq); 3718 destroy_wq: 3719 destroy_workqueue(nvme_wq); 3720 out: 3721 return result; 3722 } 3723 3724 void nvme_core_exit(void) 3725 { 3726 ida_destroy(&nvme_subsystems_ida); 3727 class_destroy(nvme_subsys_class); 3728 class_destroy(nvme_class); 3729 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 3730 destroy_workqueue(nvme_delete_wq); 3731 destroy_workqueue(nvme_reset_wq); 3732 destroy_workqueue(nvme_wq); 3733 } 3734 3735 MODULE_LICENSE("GPL"); 3736 MODULE_VERSION("1.0"); 3737 module_init(nvme_core_init); 3738 module_exit(nvme_core_exit); 3739