1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/blkdev.h> 8 #include <linux/blk-mq.h> 9 #include <linux/delay.h> 10 #include <linux/errno.h> 11 #include <linux/hdreg.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/list_sort.h> 15 #include <linux/slab.h> 16 #include <linux/types.h> 17 #include <linux/pr.h> 18 #include <linux/ptrace.h> 19 #include <linux/nvme_ioctl.h> 20 #include <linux/t10-pi.h> 21 #include <linux/pm_qos.h> 22 #include <asm/unaligned.h> 23 24 #define CREATE_TRACE_POINTS 25 #include "trace.h" 26 27 #include "nvme.h" 28 #include "fabrics.h" 29 30 #define NVME_MINORS (1U << MINORBITS) 31 32 unsigned int admin_timeout = 60; 33 module_param(admin_timeout, uint, 0644); 34 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 35 EXPORT_SYMBOL_GPL(admin_timeout); 36 37 unsigned int nvme_io_timeout = 30; 38 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 39 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 40 EXPORT_SYMBOL_GPL(nvme_io_timeout); 41 42 static unsigned char shutdown_timeout = 5; 43 module_param(shutdown_timeout, byte, 0644); 44 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 45 46 static u8 nvme_max_retries = 5; 47 module_param_named(max_retries, nvme_max_retries, byte, 0644); 48 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 49 50 static unsigned long default_ps_max_latency_us = 100000; 51 module_param(default_ps_max_latency_us, ulong, 0644); 52 MODULE_PARM_DESC(default_ps_max_latency_us, 53 "max power saving latency for new devices; use PM QOS to change per device"); 54 55 static bool force_apst; 56 module_param(force_apst, bool, 0644); 57 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 58 59 static bool streams; 60 module_param(streams, bool, 0644); 61 MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); 62 63 /* 64 * nvme_wq - hosts nvme related works that are not reset or delete 65 * nvme_reset_wq - hosts nvme reset works 66 * nvme_delete_wq - hosts nvme delete works 67 * 68 * nvme_wq will host works such are scan, aen handling, fw activation, 69 * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq 70 * runs reset works which also flush works hosted on nvme_wq for 71 * serialization purposes. nvme_delete_wq host controller deletion 72 * works which flush reset works for serialization. 73 */ 74 struct workqueue_struct *nvme_wq; 75 EXPORT_SYMBOL_GPL(nvme_wq); 76 77 struct workqueue_struct *nvme_reset_wq; 78 EXPORT_SYMBOL_GPL(nvme_reset_wq); 79 80 struct workqueue_struct *nvme_delete_wq; 81 EXPORT_SYMBOL_GPL(nvme_delete_wq); 82 83 static DEFINE_IDA(nvme_subsystems_ida); 84 static LIST_HEAD(nvme_subsystems); 85 static DEFINE_MUTEX(nvme_subsystems_lock); 86 87 static DEFINE_IDA(nvme_instance_ida); 88 static dev_t nvme_chr_devt; 89 static struct class *nvme_class; 90 static struct class *nvme_subsys_class; 91 92 static int nvme_revalidate_disk(struct gendisk *disk); 93 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 94 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 95 unsigned nsid); 96 97 static void nvme_set_queue_dying(struct nvme_ns *ns) 98 { 99 /* 100 * Revalidating a dead namespace sets capacity to 0. This will end 101 * buffered writers dirtying pages that can't be synced. 102 */ 103 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 104 return; 105 revalidate_disk(ns->disk); 106 blk_set_queue_dying(ns->queue); 107 /* Forcibly unquiesce queues to avoid blocking dispatch */ 108 blk_mq_unquiesce_queue(ns->queue); 109 } 110 111 static void nvme_queue_scan(struct nvme_ctrl *ctrl) 112 { 113 /* 114 * Only new queue scan work when admin and IO queues are both alive 115 */ 116 if (ctrl->state == NVME_CTRL_LIVE) 117 queue_work(nvme_wq, &ctrl->scan_work); 118 } 119 120 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 121 { 122 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 123 return -EBUSY; 124 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 125 return -EBUSY; 126 return 0; 127 } 128 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 129 130 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 131 { 132 int ret; 133 134 ret = nvme_reset_ctrl(ctrl); 135 if (!ret) { 136 flush_work(&ctrl->reset_work); 137 if (ctrl->state != NVME_CTRL_LIVE && 138 ctrl->state != NVME_CTRL_ADMIN_ONLY) 139 ret = -ENETRESET; 140 } 141 142 return ret; 143 } 144 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); 145 146 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 147 { 148 dev_info(ctrl->device, 149 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); 150 151 flush_work(&ctrl->reset_work); 152 nvme_stop_ctrl(ctrl); 153 nvme_remove_namespaces(ctrl); 154 ctrl->ops->delete_ctrl(ctrl); 155 nvme_uninit_ctrl(ctrl); 156 nvme_put_ctrl(ctrl); 157 } 158 159 static void nvme_delete_ctrl_work(struct work_struct *work) 160 { 161 struct nvme_ctrl *ctrl = 162 container_of(work, struct nvme_ctrl, delete_work); 163 164 nvme_do_delete_ctrl(ctrl); 165 } 166 167 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 168 { 169 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 170 return -EBUSY; 171 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 172 return -EBUSY; 173 return 0; 174 } 175 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 176 177 static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 178 { 179 int ret = 0; 180 181 /* 182 * Keep a reference until nvme_do_delete_ctrl() complete, 183 * since ->delete_ctrl can free the controller. 184 */ 185 nvme_get_ctrl(ctrl); 186 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 187 ret = -EBUSY; 188 if (!ret) 189 nvme_do_delete_ctrl(ctrl); 190 nvme_put_ctrl(ctrl); 191 return ret; 192 } 193 194 static inline bool nvme_ns_has_pi(struct nvme_ns *ns) 195 { 196 return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); 197 } 198 199 static blk_status_t nvme_error_status(struct request *req) 200 { 201 switch (nvme_req(req)->status & 0x7ff) { 202 case NVME_SC_SUCCESS: 203 return BLK_STS_OK; 204 case NVME_SC_CAP_EXCEEDED: 205 return BLK_STS_NOSPC; 206 case NVME_SC_LBA_RANGE: 207 return BLK_STS_TARGET; 208 case NVME_SC_BAD_ATTRIBUTES: 209 case NVME_SC_ONCS_NOT_SUPPORTED: 210 case NVME_SC_INVALID_OPCODE: 211 case NVME_SC_INVALID_FIELD: 212 case NVME_SC_INVALID_NS: 213 return BLK_STS_NOTSUPP; 214 case NVME_SC_WRITE_FAULT: 215 case NVME_SC_READ_ERROR: 216 case NVME_SC_UNWRITTEN_BLOCK: 217 case NVME_SC_ACCESS_DENIED: 218 case NVME_SC_READ_ONLY: 219 case NVME_SC_COMPARE_FAILED: 220 return BLK_STS_MEDIUM; 221 case NVME_SC_GUARD_CHECK: 222 case NVME_SC_APPTAG_CHECK: 223 case NVME_SC_REFTAG_CHECK: 224 case NVME_SC_INVALID_PI: 225 return BLK_STS_PROTECTION; 226 case NVME_SC_RESERVATION_CONFLICT: 227 return BLK_STS_NEXUS; 228 default: 229 return BLK_STS_IOERR; 230 } 231 } 232 233 static inline bool nvme_req_needs_retry(struct request *req) 234 { 235 if (blk_noretry_request(req)) 236 return false; 237 if (nvme_req(req)->status & NVME_SC_DNR) 238 return false; 239 if (nvme_req(req)->retries >= nvme_max_retries) 240 return false; 241 return true; 242 } 243 244 static void nvme_retry_req(struct request *req) 245 { 246 struct nvme_ns *ns = req->q->queuedata; 247 unsigned long delay = 0; 248 u16 crd; 249 250 /* The mask and shift result must be <= 3 */ 251 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 252 if (ns && crd) 253 delay = ns->ctrl->crdt[crd - 1] * 100; 254 255 nvme_req(req)->retries++; 256 blk_mq_requeue_request(req, false); 257 blk_mq_delay_kick_requeue_list(req->q, delay); 258 } 259 260 void nvme_complete_rq(struct request *req) 261 { 262 blk_status_t status = nvme_error_status(req); 263 264 trace_nvme_complete_rq(req); 265 266 if (nvme_req(req)->ctrl->kas) 267 nvme_req(req)->ctrl->comp_seen = true; 268 269 if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { 270 if ((req->cmd_flags & REQ_NVME_MPATH) && 271 blk_path_error(status)) { 272 nvme_failover_req(req); 273 return; 274 } 275 276 if (!blk_queue_dying(req->q)) { 277 nvme_retry_req(req); 278 return; 279 } 280 } 281 blk_mq_end_request(req, status); 282 } 283 EXPORT_SYMBOL_GPL(nvme_complete_rq); 284 285 bool nvme_cancel_request(struct request *req, void *data, bool reserved) 286 { 287 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 288 "Cancelling I/O %d", req->tag); 289 290 nvme_req(req)->status = NVME_SC_ABORT_REQ; 291 blk_mq_complete_request_sync(req); 292 return true; 293 } 294 EXPORT_SYMBOL_GPL(nvme_cancel_request); 295 296 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 297 enum nvme_ctrl_state new_state) 298 { 299 enum nvme_ctrl_state old_state; 300 unsigned long flags; 301 bool changed = false; 302 303 spin_lock_irqsave(&ctrl->lock, flags); 304 305 old_state = ctrl->state; 306 switch (new_state) { 307 case NVME_CTRL_ADMIN_ONLY: 308 switch (old_state) { 309 case NVME_CTRL_CONNECTING: 310 changed = true; 311 /* FALLTHRU */ 312 default: 313 break; 314 } 315 break; 316 case NVME_CTRL_LIVE: 317 switch (old_state) { 318 case NVME_CTRL_NEW: 319 case NVME_CTRL_RESETTING: 320 case NVME_CTRL_CONNECTING: 321 changed = true; 322 /* FALLTHRU */ 323 default: 324 break; 325 } 326 break; 327 case NVME_CTRL_RESETTING: 328 switch (old_state) { 329 case NVME_CTRL_NEW: 330 case NVME_CTRL_LIVE: 331 case NVME_CTRL_ADMIN_ONLY: 332 changed = true; 333 /* FALLTHRU */ 334 default: 335 break; 336 } 337 break; 338 case NVME_CTRL_CONNECTING: 339 switch (old_state) { 340 case NVME_CTRL_NEW: 341 case NVME_CTRL_RESETTING: 342 changed = true; 343 /* FALLTHRU */ 344 default: 345 break; 346 } 347 break; 348 case NVME_CTRL_DELETING: 349 switch (old_state) { 350 case NVME_CTRL_LIVE: 351 case NVME_CTRL_ADMIN_ONLY: 352 case NVME_CTRL_RESETTING: 353 case NVME_CTRL_CONNECTING: 354 changed = true; 355 /* FALLTHRU */ 356 default: 357 break; 358 } 359 break; 360 case NVME_CTRL_DEAD: 361 switch (old_state) { 362 case NVME_CTRL_DELETING: 363 changed = true; 364 /* FALLTHRU */ 365 default: 366 break; 367 } 368 break; 369 default: 370 break; 371 } 372 373 if (changed) 374 ctrl->state = new_state; 375 376 spin_unlock_irqrestore(&ctrl->lock, flags); 377 if (changed && ctrl->state == NVME_CTRL_LIVE) 378 nvme_kick_requeue_lists(ctrl); 379 return changed; 380 } 381 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 382 383 static void nvme_free_ns_head(struct kref *ref) 384 { 385 struct nvme_ns_head *head = 386 container_of(ref, struct nvme_ns_head, ref); 387 388 nvme_mpath_remove_disk(head); 389 ida_simple_remove(&head->subsys->ns_ida, head->instance); 390 list_del_init(&head->entry); 391 cleanup_srcu_struct(&head->srcu); 392 nvme_put_subsystem(head->subsys); 393 kfree(head); 394 } 395 396 static void nvme_put_ns_head(struct nvme_ns_head *head) 397 { 398 kref_put(&head->ref, nvme_free_ns_head); 399 } 400 401 static void nvme_free_ns(struct kref *kref) 402 { 403 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 404 405 if (ns->ndev) 406 nvme_nvm_unregister(ns); 407 408 put_disk(ns->disk); 409 nvme_put_ns_head(ns->head); 410 nvme_put_ctrl(ns->ctrl); 411 kfree(ns); 412 } 413 414 static void nvme_put_ns(struct nvme_ns *ns) 415 { 416 kref_put(&ns->kref, nvme_free_ns); 417 } 418 419 static inline void nvme_clear_nvme_request(struct request *req) 420 { 421 if (!(req->rq_flags & RQF_DONTPREP)) { 422 nvme_req(req)->retries = 0; 423 nvme_req(req)->flags = 0; 424 req->rq_flags |= RQF_DONTPREP; 425 } 426 } 427 428 struct request *nvme_alloc_request(struct request_queue *q, 429 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) 430 { 431 unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 432 struct request *req; 433 434 if (qid == NVME_QID_ANY) { 435 req = blk_mq_alloc_request(q, op, flags); 436 } else { 437 req = blk_mq_alloc_request_hctx(q, op, flags, 438 qid ? qid - 1 : 0); 439 } 440 if (IS_ERR(req)) 441 return req; 442 443 req->cmd_flags |= REQ_FAILFAST_DRIVER; 444 nvme_clear_nvme_request(req); 445 nvme_req(req)->cmd = cmd; 446 447 return req; 448 } 449 EXPORT_SYMBOL_GPL(nvme_alloc_request); 450 451 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) 452 { 453 struct nvme_command c; 454 455 memset(&c, 0, sizeof(c)); 456 457 c.directive.opcode = nvme_admin_directive_send; 458 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); 459 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; 460 c.directive.dtype = NVME_DIR_IDENTIFY; 461 c.directive.tdtype = NVME_DIR_STREAMS; 462 c.directive.endir = enable ? NVME_DIR_ENDIR : 0; 463 464 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); 465 } 466 467 static int nvme_disable_streams(struct nvme_ctrl *ctrl) 468 { 469 return nvme_toggle_streams(ctrl, false); 470 } 471 472 static int nvme_enable_streams(struct nvme_ctrl *ctrl) 473 { 474 return nvme_toggle_streams(ctrl, true); 475 } 476 477 static int nvme_get_stream_params(struct nvme_ctrl *ctrl, 478 struct streams_directive_params *s, u32 nsid) 479 { 480 struct nvme_command c; 481 482 memset(&c, 0, sizeof(c)); 483 memset(s, 0, sizeof(*s)); 484 485 c.directive.opcode = nvme_admin_directive_recv; 486 c.directive.nsid = cpu_to_le32(nsid); 487 c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); 488 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 489 c.directive.dtype = NVME_DIR_STREAMS; 490 491 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); 492 } 493 494 static int nvme_configure_directives(struct nvme_ctrl *ctrl) 495 { 496 struct streams_directive_params s; 497 int ret; 498 499 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) 500 return 0; 501 if (!streams) 502 return 0; 503 504 ret = nvme_enable_streams(ctrl); 505 if (ret) 506 return ret; 507 508 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); 509 if (ret) 510 return ret; 511 512 ctrl->nssa = le16_to_cpu(s.nssa); 513 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { 514 dev_info(ctrl->device, "too few streams (%u) available\n", 515 ctrl->nssa); 516 nvme_disable_streams(ctrl); 517 return 0; 518 } 519 520 ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); 521 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); 522 return 0; 523 } 524 525 /* 526 * Check if 'req' has a write hint associated with it. If it does, assign 527 * a valid namespace stream to the write. 528 */ 529 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, 530 struct request *req, u16 *control, 531 u32 *dsmgmt) 532 { 533 enum rw_hint streamid = req->write_hint; 534 535 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE) 536 streamid = 0; 537 else { 538 streamid--; 539 if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) 540 return; 541 542 *control |= NVME_RW_DTYPE_STREAMS; 543 *dsmgmt |= streamid << 16; 544 } 545 546 if (streamid < ARRAY_SIZE(req->q->write_hints)) 547 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; 548 } 549 550 static inline void nvme_setup_flush(struct nvme_ns *ns, 551 struct nvme_command *cmnd) 552 { 553 cmnd->common.opcode = nvme_cmd_flush; 554 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 555 } 556 557 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 558 struct nvme_command *cmnd) 559 { 560 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 561 struct nvme_dsm_range *range; 562 struct bio *bio; 563 564 range = kmalloc_array(segments, sizeof(*range), 565 GFP_ATOMIC | __GFP_NOWARN); 566 if (!range) { 567 /* 568 * If we fail allocation our range, fallback to the controller 569 * discard page. If that's also busy, it's safe to return 570 * busy, as we know we can make progress once that's freed. 571 */ 572 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 573 return BLK_STS_RESOURCE; 574 575 range = page_address(ns->ctrl->discard_page); 576 } 577 578 __rq_for_each_bio(bio, req) { 579 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 580 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 581 582 if (n < segments) { 583 range[n].cattr = cpu_to_le32(0); 584 range[n].nlb = cpu_to_le32(nlb); 585 range[n].slba = cpu_to_le64(slba); 586 } 587 n++; 588 } 589 590 if (WARN_ON_ONCE(n != segments)) { 591 if (virt_to_page(range) == ns->ctrl->discard_page) 592 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 593 else 594 kfree(range); 595 return BLK_STS_IOERR; 596 } 597 598 cmnd->dsm.opcode = nvme_cmd_dsm; 599 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 600 cmnd->dsm.nr = cpu_to_le32(segments - 1); 601 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 602 603 req->special_vec.bv_page = virt_to_page(range); 604 req->special_vec.bv_offset = offset_in_page(range); 605 req->special_vec.bv_len = sizeof(*range) * segments; 606 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 607 608 return BLK_STS_OK; 609 } 610 611 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 612 struct request *req, struct nvme_command *cmnd) 613 { 614 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 615 return nvme_setup_discard(ns, req, cmnd); 616 617 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 618 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 619 cmnd->write_zeroes.slba = 620 cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 621 cmnd->write_zeroes.length = 622 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 623 cmnd->write_zeroes.control = 0; 624 return BLK_STS_OK; 625 } 626 627 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 628 struct request *req, struct nvme_command *cmnd) 629 { 630 struct nvme_ctrl *ctrl = ns->ctrl; 631 u16 control = 0; 632 u32 dsmgmt = 0; 633 634 if (req->cmd_flags & REQ_FUA) 635 control |= NVME_RW_FUA; 636 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 637 control |= NVME_RW_LR; 638 639 if (req->cmd_flags & REQ_RAHEAD) 640 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 641 642 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); 643 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 644 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 645 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 646 647 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) 648 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); 649 650 if (ns->ms) { 651 /* 652 * If formated with metadata, the block layer always provides a 653 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 654 * we enable the PRACT bit for protection information or set the 655 * namespace capacity to zero to prevent any I/O. 656 */ 657 if (!blk_integrity_rq(req)) { 658 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 659 return BLK_STS_NOTSUPP; 660 control |= NVME_RW_PRINFO_PRACT; 661 } else if (req_op(req) == REQ_OP_WRITE) { 662 t10_pi_prepare(req, ns->pi_type); 663 } 664 665 switch (ns->pi_type) { 666 case NVME_NS_DPS_PI_TYPE3: 667 control |= NVME_RW_PRINFO_PRCHK_GUARD; 668 break; 669 case NVME_NS_DPS_PI_TYPE1: 670 case NVME_NS_DPS_PI_TYPE2: 671 control |= NVME_RW_PRINFO_PRCHK_GUARD | 672 NVME_RW_PRINFO_PRCHK_REF; 673 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 674 break; 675 } 676 } 677 678 cmnd->rw.control = cpu_to_le16(control); 679 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 680 return 0; 681 } 682 683 void nvme_cleanup_cmd(struct request *req) 684 { 685 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 686 nvme_req(req)->status == 0) { 687 struct nvme_ns *ns = req->rq_disk->private_data; 688 689 t10_pi_complete(req, ns->pi_type, 690 blk_rq_bytes(req) >> ns->lba_shift); 691 } 692 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 693 struct nvme_ns *ns = req->rq_disk->private_data; 694 struct page *page = req->special_vec.bv_page; 695 696 if (page == ns->ctrl->discard_page) 697 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 698 else 699 kfree(page_address(page) + req->special_vec.bv_offset); 700 } 701 } 702 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 703 704 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 705 struct nvme_command *cmd) 706 { 707 blk_status_t ret = BLK_STS_OK; 708 709 nvme_clear_nvme_request(req); 710 711 memset(cmd, 0, sizeof(*cmd)); 712 switch (req_op(req)) { 713 case REQ_OP_DRV_IN: 714 case REQ_OP_DRV_OUT: 715 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); 716 break; 717 case REQ_OP_FLUSH: 718 nvme_setup_flush(ns, cmd); 719 break; 720 case REQ_OP_WRITE_ZEROES: 721 ret = nvme_setup_write_zeroes(ns, req, cmd); 722 break; 723 case REQ_OP_DISCARD: 724 ret = nvme_setup_discard(ns, req, cmd); 725 break; 726 case REQ_OP_READ: 727 case REQ_OP_WRITE: 728 ret = nvme_setup_rw(ns, req, cmd); 729 break; 730 default: 731 WARN_ON_ONCE(1); 732 return BLK_STS_IOERR; 733 } 734 735 cmd->common.command_id = req->tag; 736 trace_nvme_setup_cmd(req, cmd); 737 return ret; 738 } 739 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 740 741 static void nvme_end_sync_rq(struct request *rq, blk_status_t error) 742 { 743 struct completion *waiting = rq->end_io_data; 744 745 rq->end_io_data = NULL; 746 complete(waiting); 747 } 748 749 static void nvme_execute_rq_polled(struct request_queue *q, 750 struct gendisk *bd_disk, struct request *rq, int at_head) 751 { 752 DECLARE_COMPLETION_ONSTACK(wait); 753 754 WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)); 755 756 rq->cmd_flags |= REQ_HIPRI; 757 rq->end_io_data = &wait; 758 blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq); 759 760 while (!completion_done(&wait)) { 761 blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true); 762 cond_resched(); 763 } 764 } 765 766 /* 767 * Returns 0 on success. If the result is negative, it's a Linux error code; 768 * if the result is positive, it's an NVM Express status code 769 */ 770 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 771 union nvme_result *result, void *buffer, unsigned bufflen, 772 unsigned timeout, int qid, int at_head, 773 blk_mq_req_flags_t flags, bool poll) 774 { 775 struct request *req; 776 int ret; 777 778 req = nvme_alloc_request(q, cmd, flags, qid); 779 if (IS_ERR(req)) 780 return PTR_ERR(req); 781 782 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 783 784 if (buffer && bufflen) { 785 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 786 if (ret) 787 goto out; 788 } 789 790 if (poll) 791 nvme_execute_rq_polled(req->q, NULL, req, at_head); 792 else 793 blk_execute_rq(req->q, NULL, req, at_head); 794 if (result) 795 *result = nvme_req(req)->result; 796 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 797 ret = -EINTR; 798 else 799 ret = nvme_req(req)->status; 800 out: 801 blk_mq_free_request(req); 802 return ret; 803 } 804 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 805 806 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 807 void *buffer, unsigned bufflen) 808 { 809 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, 810 NVME_QID_ANY, 0, 0, false); 811 } 812 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 813 814 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, 815 unsigned len, u32 seed, bool write) 816 { 817 struct bio_integrity_payload *bip; 818 int ret = -ENOMEM; 819 void *buf; 820 821 buf = kmalloc(len, GFP_KERNEL); 822 if (!buf) 823 goto out; 824 825 ret = -EFAULT; 826 if (write && copy_from_user(buf, ubuf, len)) 827 goto out_free_meta; 828 829 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 830 if (IS_ERR(bip)) { 831 ret = PTR_ERR(bip); 832 goto out_free_meta; 833 } 834 835 bip->bip_iter.bi_size = len; 836 bip->bip_iter.bi_sector = seed; 837 ret = bio_integrity_add_page(bio, virt_to_page(buf), len, 838 offset_in_page(buf)); 839 if (ret == len) 840 return buf; 841 ret = -ENOMEM; 842 out_free_meta: 843 kfree(buf); 844 out: 845 return ERR_PTR(ret); 846 } 847 848 static int nvme_submit_user_cmd(struct request_queue *q, 849 struct nvme_command *cmd, void __user *ubuffer, 850 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 851 u32 meta_seed, u32 *result, unsigned timeout) 852 { 853 bool write = nvme_is_write(cmd); 854 struct nvme_ns *ns = q->queuedata; 855 struct gendisk *disk = ns ? ns->disk : NULL; 856 struct request *req; 857 struct bio *bio = NULL; 858 void *meta = NULL; 859 int ret; 860 861 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); 862 if (IS_ERR(req)) 863 return PTR_ERR(req); 864 865 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 866 nvme_req(req)->flags |= NVME_REQ_USERCMD; 867 868 if (ubuffer && bufflen) { 869 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 870 GFP_KERNEL); 871 if (ret) 872 goto out; 873 bio = req->bio; 874 bio->bi_disk = disk; 875 if (disk && meta_buffer && meta_len) { 876 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, 877 meta_seed, write); 878 if (IS_ERR(meta)) { 879 ret = PTR_ERR(meta); 880 goto out_unmap; 881 } 882 req->cmd_flags |= REQ_INTEGRITY; 883 } 884 } 885 886 blk_execute_rq(req->q, disk, req, 0); 887 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 888 ret = -EINTR; 889 else 890 ret = nvme_req(req)->status; 891 if (result) 892 *result = le32_to_cpu(nvme_req(req)->result.u32); 893 if (meta && !ret && !write) { 894 if (copy_to_user(meta_buffer, meta, meta_len)) 895 ret = -EFAULT; 896 } 897 kfree(meta); 898 out_unmap: 899 if (bio) 900 blk_rq_unmap_user(bio); 901 out: 902 blk_mq_free_request(req); 903 return ret; 904 } 905 906 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) 907 { 908 struct nvme_ctrl *ctrl = rq->end_io_data; 909 unsigned long flags; 910 bool startka = false; 911 912 blk_mq_free_request(rq); 913 914 if (status) { 915 dev_err(ctrl->device, 916 "failed nvme_keep_alive_end_io error=%d\n", 917 status); 918 return; 919 } 920 921 ctrl->comp_seen = false; 922 spin_lock_irqsave(&ctrl->lock, flags); 923 if (ctrl->state == NVME_CTRL_LIVE || 924 ctrl->state == NVME_CTRL_CONNECTING) 925 startka = true; 926 spin_unlock_irqrestore(&ctrl->lock, flags); 927 if (startka) 928 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 929 } 930 931 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 932 { 933 struct request *rq; 934 935 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, 936 NVME_QID_ANY); 937 if (IS_ERR(rq)) 938 return PTR_ERR(rq); 939 940 rq->timeout = ctrl->kato * HZ; 941 rq->end_io_data = ctrl; 942 943 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); 944 945 return 0; 946 } 947 948 static void nvme_keep_alive_work(struct work_struct *work) 949 { 950 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 951 struct nvme_ctrl, ka_work); 952 bool comp_seen = ctrl->comp_seen; 953 954 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 955 dev_dbg(ctrl->device, 956 "reschedule traffic based keep-alive timer\n"); 957 ctrl->comp_seen = false; 958 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 959 return; 960 } 961 962 if (nvme_keep_alive(ctrl)) { 963 /* allocation failure, reset the controller */ 964 dev_err(ctrl->device, "keep-alive failed\n"); 965 nvme_reset_ctrl(ctrl); 966 return; 967 } 968 } 969 970 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 971 { 972 if (unlikely(ctrl->kato == 0)) 973 return; 974 975 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 976 } 977 978 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 979 { 980 if (unlikely(ctrl->kato == 0)) 981 return; 982 983 cancel_delayed_work_sync(&ctrl->ka_work); 984 } 985 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 986 987 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 988 { 989 struct nvme_command c = { }; 990 int error; 991 992 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 993 c.identify.opcode = nvme_admin_identify; 994 c.identify.cns = NVME_ID_CNS_CTRL; 995 996 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 997 if (!*id) 998 return -ENOMEM; 999 1000 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1001 sizeof(struct nvme_id_ctrl)); 1002 if (error) 1003 kfree(*id); 1004 return error; 1005 } 1006 1007 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, 1008 struct nvme_ns_ids *ids) 1009 { 1010 struct nvme_command c = { }; 1011 int status; 1012 void *data; 1013 int pos; 1014 int len; 1015 1016 c.identify.opcode = nvme_admin_identify; 1017 c.identify.nsid = cpu_to_le32(nsid); 1018 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1019 1020 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1021 if (!data) 1022 return -ENOMEM; 1023 1024 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1025 NVME_IDENTIFY_DATA_SIZE); 1026 if (status) 1027 goto free_data; 1028 1029 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1030 struct nvme_ns_id_desc *cur = data + pos; 1031 1032 if (cur->nidl == 0) 1033 break; 1034 1035 switch (cur->nidt) { 1036 case NVME_NIDT_EUI64: 1037 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1038 dev_warn(ctrl->device, 1039 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n", 1040 cur->nidl); 1041 goto free_data; 1042 } 1043 len = NVME_NIDT_EUI64_LEN; 1044 memcpy(ids->eui64, data + pos + sizeof(*cur), len); 1045 break; 1046 case NVME_NIDT_NGUID: 1047 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1048 dev_warn(ctrl->device, 1049 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n", 1050 cur->nidl); 1051 goto free_data; 1052 } 1053 len = NVME_NIDT_NGUID_LEN; 1054 memcpy(ids->nguid, data + pos + sizeof(*cur), len); 1055 break; 1056 case NVME_NIDT_UUID: 1057 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1058 dev_warn(ctrl->device, 1059 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n", 1060 cur->nidl); 1061 goto free_data; 1062 } 1063 len = NVME_NIDT_UUID_LEN; 1064 uuid_copy(&ids->uuid, data + pos + sizeof(*cur)); 1065 break; 1066 default: 1067 /* Skip unknown types */ 1068 len = cur->nidl; 1069 break; 1070 } 1071 1072 len += sizeof(*cur); 1073 } 1074 free_data: 1075 kfree(data); 1076 return status; 1077 } 1078 1079 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) 1080 { 1081 struct nvme_command c = { }; 1082 1083 c.identify.opcode = nvme_admin_identify; 1084 c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; 1085 c.identify.nsid = cpu_to_le32(nsid); 1086 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 1087 NVME_IDENTIFY_DATA_SIZE); 1088 } 1089 1090 static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, 1091 unsigned nsid) 1092 { 1093 struct nvme_id_ns *id; 1094 struct nvme_command c = { }; 1095 int error; 1096 1097 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1098 c.identify.opcode = nvme_admin_identify; 1099 c.identify.nsid = cpu_to_le32(nsid); 1100 c.identify.cns = NVME_ID_CNS_NS; 1101 1102 id = kmalloc(sizeof(*id), GFP_KERNEL); 1103 if (!id) 1104 return NULL; 1105 1106 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 1107 if (error) { 1108 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1109 kfree(id); 1110 return NULL; 1111 } 1112 1113 return id; 1114 } 1115 1116 static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, 1117 void *buffer, size_t buflen, u32 *result) 1118 { 1119 struct nvme_command c; 1120 union nvme_result res; 1121 int ret; 1122 1123 memset(&c, 0, sizeof(c)); 1124 c.features.opcode = nvme_admin_set_features; 1125 c.features.fid = cpu_to_le32(fid); 1126 c.features.dword11 = cpu_to_le32(dword11); 1127 1128 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1129 buffer, buflen, 0, NVME_QID_ANY, 0, 0, false); 1130 if (ret >= 0 && result) 1131 *result = le32_to_cpu(res.u32); 1132 return ret; 1133 } 1134 1135 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1136 { 1137 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1138 u32 result; 1139 int status, nr_io_queues; 1140 1141 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1142 &result); 1143 if (status < 0) 1144 return status; 1145 1146 /* 1147 * Degraded controllers might return an error when setting the queue 1148 * count. We still want to be able to bring them online and offer 1149 * access to the admin queue, as that might be only way to fix them up. 1150 */ 1151 if (status > 0) { 1152 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1153 *count = 0; 1154 } else { 1155 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1156 *count = min(*count, nr_io_queues); 1157 } 1158 1159 return 0; 1160 } 1161 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1162 1163 #define NVME_AEN_SUPPORTED \ 1164 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE) 1165 1166 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1167 { 1168 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1169 int status; 1170 1171 if (!supported_aens) 1172 return; 1173 1174 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1175 NULL, 0, &result); 1176 if (status) 1177 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1178 supported_aens); 1179 } 1180 1181 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1182 { 1183 struct nvme_user_io io; 1184 struct nvme_command c; 1185 unsigned length, meta_len; 1186 void __user *metadata; 1187 1188 if (copy_from_user(&io, uio, sizeof(io))) 1189 return -EFAULT; 1190 if (io.flags) 1191 return -EINVAL; 1192 1193 switch (io.opcode) { 1194 case nvme_cmd_write: 1195 case nvme_cmd_read: 1196 case nvme_cmd_compare: 1197 break; 1198 default: 1199 return -EINVAL; 1200 } 1201 1202 length = (io.nblocks + 1) << ns->lba_shift; 1203 meta_len = (io.nblocks + 1) * ns->ms; 1204 metadata = (void __user *)(uintptr_t)io.metadata; 1205 1206 if (ns->ext) { 1207 length += meta_len; 1208 meta_len = 0; 1209 } else if (meta_len) { 1210 if ((io.metadata & 3) || !io.metadata) 1211 return -EINVAL; 1212 } 1213 1214 memset(&c, 0, sizeof(c)); 1215 c.rw.opcode = io.opcode; 1216 c.rw.flags = io.flags; 1217 c.rw.nsid = cpu_to_le32(ns->head->ns_id); 1218 c.rw.slba = cpu_to_le64(io.slba); 1219 c.rw.length = cpu_to_le16(io.nblocks); 1220 c.rw.control = cpu_to_le16(io.control); 1221 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 1222 c.rw.reftag = cpu_to_le32(io.reftag); 1223 c.rw.apptag = cpu_to_le16(io.apptag); 1224 c.rw.appmask = cpu_to_le16(io.appmask); 1225 1226 return nvme_submit_user_cmd(ns->queue, &c, 1227 (void __user *)(uintptr_t)io.addr, length, 1228 metadata, meta_len, lower_32_bits(io.slba), NULL, 0); 1229 } 1230 1231 static u32 nvme_known_admin_effects(u8 opcode) 1232 { 1233 switch (opcode) { 1234 case nvme_admin_format_nvm: 1235 return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | 1236 NVME_CMD_EFFECTS_CSE_MASK; 1237 case nvme_admin_sanitize_nvm: 1238 return NVME_CMD_EFFECTS_CSE_MASK; 1239 default: 1240 break; 1241 } 1242 return 0; 1243 } 1244 1245 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1246 u8 opcode) 1247 { 1248 u32 effects = 0; 1249 1250 if (ns) { 1251 if (ctrl->effects) 1252 effects = le32_to_cpu(ctrl->effects->iocs[opcode]); 1253 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1254 dev_warn(ctrl->device, 1255 "IO command:%02x has unhandled effects:%08x\n", 1256 opcode, effects); 1257 return 0; 1258 } 1259 1260 if (ctrl->effects) 1261 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1262 else 1263 effects = nvme_known_admin_effects(opcode); 1264 1265 /* 1266 * For simplicity, IO to all namespaces is quiesced even if the command 1267 * effects say only one namespace is affected. 1268 */ 1269 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1270 mutex_lock(&ctrl->scan_lock); 1271 nvme_start_freeze(ctrl); 1272 nvme_wait_freeze(ctrl); 1273 } 1274 return effects; 1275 } 1276 1277 static void nvme_update_formats(struct nvme_ctrl *ctrl) 1278 { 1279 struct nvme_ns *ns; 1280 1281 down_read(&ctrl->namespaces_rwsem); 1282 list_for_each_entry(ns, &ctrl->namespaces, list) 1283 if (ns->disk && nvme_revalidate_disk(ns->disk)) 1284 nvme_set_queue_dying(ns); 1285 up_read(&ctrl->namespaces_rwsem); 1286 1287 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); 1288 } 1289 1290 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1291 { 1292 /* 1293 * Revalidate LBA changes prior to unfreezing. This is necessary to 1294 * prevent memory corruption if a logical block size was changed by 1295 * this command. 1296 */ 1297 if (effects & NVME_CMD_EFFECTS_LBCC) 1298 nvme_update_formats(ctrl); 1299 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1300 nvme_unfreeze(ctrl); 1301 mutex_unlock(&ctrl->scan_lock); 1302 } 1303 if (effects & NVME_CMD_EFFECTS_CCC) 1304 nvme_init_identify(ctrl); 1305 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) 1306 nvme_queue_scan(ctrl); 1307 } 1308 1309 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1310 struct nvme_passthru_cmd __user *ucmd) 1311 { 1312 struct nvme_passthru_cmd cmd; 1313 struct nvme_command c; 1314 unsigned timeout = 0; 1315 u32 effects; 1316 int status; 1317 1318 if (!capable(CAP_SYS_ADMIN)) 1319 return -EACCES; 1320 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1321 return -EFAULT; 1322 if (cmd.flags) 1323 return -EINVAL; 1324 1325 memset(&c, 0, sizeof(c)); 1326 c.common.opcode = cmd.opcode; 1327 c.common.flags = cmd.flags; 1328 c.common.nsid = cpu_to_le32(cmd.nsid); 1329 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1330 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1331 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 1332 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 1333 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 1334 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 1335 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 1336 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 1337 1338 if (cmd.timeout_ms) 1339 timeout = msecs_to_jiffies(cmd.timeout_ms); 1340 1341 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); 1342 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1343 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 1344 (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, 1345 0, &cmd.result, timeout); 1346 nvme_passthru_end(ctrl, effects); 1347 1348 if (status >= 0) { 1349 if (put_user(cmd.result, &ucmd->result)) 1350 return -EFAULT; 1351 } 1352 1353 return status; 1354 } 1355 1356 /* 1357 * Issue ioctl requests on the first available path. Note that unlike normal 1358 * block layer requests we will not retry failed request on another controller. 1359 */ 1360 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, 1361 struct nvme_ns_head **head, int *srcu_idx) 1362 { 1363 #ifdef CONFIG_NVME_MULTIPATH 1364 if (disk->fops == &nvme_ns_head_ops) { 1365 *head = disk->private_data; 1366 *srcu_idx = srcu_read_lock(&(*head)->srcu); 1367 return nvme_find_path(*head); 1368 } 1369 #endif 1370 *head = NULL; 1371 *srcu_idx = -1; 1372 return disk->private_data; 1373 } 1374 1375 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) 1376 { 1377 if (head) 1378 srcu_read_unlock(&head->srcu, idx); 1379 } 1380 1381 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg) 1382 { 1383 switch (cmd) { 1384 case NVME_IOCTL_ID: 1385 force_successful_syscall_return(); 1386 return ns->head->ns_id; 1387 case NVME_IOCTL_ADMIN_CMD: 1388 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); 1389 case NVME_IOCTL_IO_CMD: 1390 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); 1391 case NVME_IOCTL_SUBMIT_IO: 1392 return nvme_submit_io(ns, (void __user *)arg); 1393 default: 1394 #ifdef CONFIG_NVM 1395 if (ns->ndev) 1396 return nvme_nvm_ioctl(ns, cmd, arg); 1397 #endif 1398 if (is_sed_ioctl(cmd)) 1399 return sed_ioctl(ns->ctrl->opal_dev, cmd, 1400 (void __user *) arg); 1401 return -ENOTTY; 1402 } 1403 } 1404 1405 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 1406 unsigned int cmd, unsigned long arg) 1407 { 1408 struct nvme_ns_head *head = NULL; 1409 struct nvme_ns *ns; 1410 int srcu_idx, ret; 1411 1412 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1413 if (unlikely(!ns)) 1414 ret = -EWOULDBLOCK; 1415 else 1416 ret = nvme_ns_ioctl(ns, cmd, arg); 1417 nvme_put_ns_from_disk(head, srcu_idx); 1418 return ret; 1419 } 1420 1421 static int nvme_open(struct block_device *bdev, fmode_t mode) 1422 { 1423 struct nvme_ns *ns = bdev->bd_disk->private_data; 1424 1425 #ifdef CONFIG_NVME_MULTIPATH 1426 /* should never be called due to GENHD_FL_HIDDEN */ 1427 if (WARN_ON_ONCE(ns->head->disk)) 1428 goto fail; 1429 #endif 1430 if (!kref_get_unless_zero(&ns->kref)) 1431 goto fail; 1432 if (!try_module_get(ns->ctrl->ops->module)) 1433 goto fail_put_ns; 1434 1435 return 0; 1436 1437 fail_put_ns: 1438 nvme_put_ns(ns); 1439 fail: 1440 return -ENXIO; 1441 } 1442 1443 static void nvme_release(struct gendisk *disk, fmode_t mode) 1444 { 1445 struct nvme_ns *ns = disk->private_data; 1446 1447 module_put(ns->ctrl->ops->module); 1448 nvme_put_ns(ns); 1449 } 1450 1451 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1452 { 1453 /* some standard values */ 1454 geo->heads = 1 << 6; 1455 geo->sectors = 1 << 5; 1456 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1457 return 0; 1458 } 1459 1460 #ifdef CONFIG_BLK_DEV_INTEGRITY 1461 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) 1462 { 1463 struct blk_integrity integrity; 1464 1465 memset(&integrity, 0, sizeof(integrity)); 1466 switch (pi_type) { 1467 case NVME_NS_DPS_PI_TYPE3: 1468 integrity.profile = &t10_pi_type3_crc; 1469 integrity.tag_size = sizeof(u16) + sizeof(u32); 1470 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1471 break; 1472 case NVME_NS_DPS_PI_TYPE1: 1473 case NVME_NS_DPS_PI_TYPE2: 1474 integrity.profile = &t10_pi_type1_crc; 1475 integrity.tag_size = sizeof(u16); 1476 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1477 break; 1478 default: 1479 integrity.profile = NULL; 1480 break; 1481 } 1482 integrity.tuple_size = ms; 1483 blk_integrity_register(disk, &integrity); 1484 blk_queue_max_integrity_segments(disk->queue, 1); 1485 } 1486 #else 1487 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) 1488 { 1489 } 1490 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1491 1492 static void nvme_set_chunk_size(struct nvme_ns *ns) 1493 { 1494 u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9)); 1495 blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); 1496 } 1497 1498 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) 1499 { 1500 struct nvme_ctrl *ctrl = ns->ctrl; 1501 struct request_queue *queue = disk->queue; 1502 u32 size = queue_logical_block_size(queue); 1503 1504 if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { 1505 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue); 1506 return; 1507 } 1508 1509 if (ctrl->nr_streams && ns->sws && ns->sgs) 1510 size *= ns->sws * ns->sgs; 1511 1512 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1513 NVME_DSM_MAX_RANGES); 1514 1515 queue->limits.discard_alignment = 0; 1516 queue->limits.discard_granularity = size; 1517 1518 /* If discard is already enabled, don't reset queue limits */ 1519 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue)) 1520 return; 1521 1522 blk_queue_max_discard_sectors(queue, UINT_MAX); 1523 blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); 1524 1525 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1526 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1527 } 1528 1529 static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) 1530 { 1531 u32 max_sectors; 1532 unsigned short bs = 1 << ns->lba_shift; 1533 1534 if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || 1535 (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 1536 return; 1537 /* 1538 * Even though NVMe spec explicitly states that MDTS is not 1539 * applicable to the write-zeroes:- "The restriction does not apply to 1540 * commands that do not transfer data between the host and the 1541 * controller (e.g., Write Uncorrectable ro Write Zeroes command).". 1542 * In order to be more cautious use controller's max_hw_sectors value 1543 * to configure the maximum sectors for the write-zeroes which is 1544 * configured based on the controller's MDTS field in the 1545 * nvme_init_identify() if available. 1546 */ 1547 if (ns->ctrl->max_hw_sectors == UINT_MAX) 1548 max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9; 1549 else 1550 max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9; 1551 1552 blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors); 1553 } 1554 1555 static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, 1556 struct nvme_id_ns *id, struct nvme_ns_ids *ids) 1557 { 1558 memset(ids, 0, sizeof(*ids)); 1559 1560 if (ctrl->vs >= NVME_VS(1, 1, 0)) 1561 memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); 1562 if (ctrl->vs >= NVME_VS(1, 2, 0)) 1563 memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); 1564 if (ctrl->vs >= NVME_VS(1, 3, 0)) { 1565 /* Don't treat error as fatal we potentially 1566 * already have a NGUID or EUI-64 1567 */ 1568 if (nvme_identify_ns_descs(ctrl, nsid, ids)) 1569 dev_warn(ctrl->device, 1570 "%s: Identify Descriptors failed\n", __func__); 1571 } 1572 } 1573 1574 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) 1575 { 1576 return !uuid_is_null(&ids->uuid) || 1577 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) || 1578 memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 1579 } 1580 1581 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1582 { 1583 return uuid_equal(&a->uuid, &b->uuid) && 1584 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1585 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0; 1586 } 1587 1588 static void nvme_update_disk_info(struct gendisk *disk, 1589 struct nvme_ns *ns, struct nvme_id_ns *id) 1590 { 1591 sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9); 1592 unsigned short bs = 1 << ns->lba_shift; 1593 1594 if (ns->lba_shift > PAGE_SHIFT) { 1595 /* unsupported block size, set capacity to 0 later */ 1596 bs = (1 << 9); 1597 } 1598 blk_mq_freeze_queue(disk->queue); 1599 blk_integrity_unregister(disk); 1600 1601 blk_queue_logical_block_size(disk->queue, bs); 1602 blk_queue_physical_block_size(disk->queue, bs); 1603 blk_queue_io_min(disk->queue, bs); 1604 1605 if (ns->ms && !ns->ext && 1606 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1607 nvme_init_integrity(disk, ns->ms, ns->pi_type); 1608 if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) || 1609 ns->lba_shift > PAGE_SHIFT) 1610 capacity = 0; 1611 1612 set_capacity(disk, capacity); 1613 1614 nvme_config_discard(disk, ns); 1615 nvme_config_write_zeroes(disk, ns); 1616 1617 if (id->nsattr & (1 << 0)) 1618 set_disk_ro(disk, true); 1619 else 1620 set_disk_ro(disk, false); 1621 1622 blk_mq_unfreeze_queue(disk->queue); 1623 } 1624 1625 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) 1626 { 1627 struct nvme_ns *ns = disk->private_data; 1628 1629 /* 1630 * If identify namespace failed, use default 512 byte block size so 1631 * block layer can use before failing read/write for 0 capacity. 1632 */ 1633 ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; 1634 if (ns->lba_shift == 0) 1635 ns->lba_shift = 9; 1636 ns->noiob = le16_to_cpu(id->noiob); 1637 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1638 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 1639 /* the PI implementation requires metadata equal t10 pi tuple size */ 1640 if (ns->ms == sizeof(struct t10_pi_tuple)) 1641 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1642 else 1643 ns->pi_type = 0; 1644 1645 if (ns->noiob) 1646 nvme_set_chunk_size(ns); 1647 nvme_update_disk_info(disk, ns, id); 1648 #ifdef CONFIG_NVME_MULTIPATH 1649 if (ns->head->disk) { 1650 nvme_update_disk_info(ns->head->disk, ns, id); 1651 blk_queue_stack_limits(ns->head->disk->queue, ns->queue); 1652 } 1653 #endif 1654 } 1655 1656 static int nvme_revalidate_disk(struct gendisk *disk) 1657 { 1658 struct nvme_ns *ns = disk->private_data; 1659 struct nvme_ctrl *ctrl = ns->ctrl; 1660 struct nvme_id_ns *id; 1661 struct nvme_ns_ids ids; 1662 int ret = 0; 1663 1664 if (test_bit(NVME_NS_DEAD, &ns->flags)) { 1665 set_capacity(disk, 0); 1666 return -ENODEV; 1667 } 1668 1669 id = nvme_identify_ns(ctrl, ns->head->ns_id); 1670 if (!id) 1671 return -ENODEV; 1672 1673 if (id->ncap == 0) { 1674 ret = -ENODEV; 1675 goto out; 1676 } 1677 1678 __nvme_revalidate_disk(disk, id); 1679 nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); 1680 if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) { 1681 dev_err(ctrl->device, 1682 "identifiers changed for nsid %d\n", ns->head->ns_id); 1683 ret = -ENODEV; 1684 } 1685 1686 out: 1687 kfree(id); 1688 return ret; 1689 } 1690 1691 static char nvme_pr_type(enum pr_type type) 1692 { 1693 switch (type) { 1694 case PR_WRITE_EXCLUSIVE: 1695 return 1; 1696 case PR_EXCLUSIVE_ACCESS: 1697 return 2; 1698 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1699 return 3; 1700 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1701 return 4; 1702 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1703 return 5; 1704 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1705 return 6; 1706 default: 1707 return 0; 1708 } 1709 }; 1710 1711 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 1712 u64 key, u64 sa_key, u8 op) 1713 { 1714 struct nvme_ns_head *head = NULL; 1715 struct nvme_ns *ns; 1716 struct nvme_command c; 1717 int srcu_idx, ret; 1718 u8 data[16] = { 0, }; 1719 1720 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1721 if (unlikely(!ns)) 1722 return -EWOULDBLOCK; 1723 1724 put_unaligned_le64(key, &data[0]); 1725 put_unaligned_le64(sa_key, &data[8]); 1726 1727 memset(&c, 0, sizeof(c)); 1728 c.common.opcode = op; 1729 c.common.nsid = cpu_to_le32(ns->head->ns_id); 1730 c.common.cdw10 = cpu_to_le32(cdw10); 1731 1732 ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); 1733 nvme_put_ns_from_disk(head, srcu_idx); 1734 return ret; 1735 } 1736 1737 static int nvme_pr_register(struct block_device *bdev, u64 old, 1738 u64 new, unsigned flags) 1739 { 1740 u32 cdw10; 1741 1742 if (flags & ~PR_FL_IGNORE_KEY) 1743 return -EOPNOTSUPP; 1744 1745 cdw10 = old ? 2 : 0; 1746 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 1747 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 1748 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 1749 } 1750 1751 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 1752 enum pr_type type, unsigned flags) 1753 { 1754 u32 cdw10; 1755 1756 if (flags & ~PR_FL_IGNORE_KEY) 1757 return -EOPNOTSUPP; 1758 1759 cdw10 = nvme_pr_type(type) << 8; 1760 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 1761 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 1762 } 1763 1764 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 1765 enum pr_type type, bool abort) 1766 { 1767 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); 1768 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 1769 } 1770 1771 static int nvme_pr_clear(struct block_device *bdev, u64 key) 1772 { 1773 u32 cdw10 = 1 | (key ? 1 << 3 : 0); 1774 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); 1775 } 1776 1777 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1778 { 1779 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0); 1780 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 1781 } 1782 1783 static const struct pr_ops nvme_pr_ops = { 1784 .pr_register = nvme_pr_register, 1785 .pr_reserve = nvme_pr_reserve, 1786 .pr_release = nvme_pr_release, 1787 .pr_preempt = nvme_pr_preempt, 1788 .pr_clear = nvme_pr_clear, 1789 }; 1790 1791 #ifdef CONFIG_BLK_SED_OPAL 1792 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 1793 bool send) 1794 { 1795 struct nvme_ctrl *ctrl = data; 1796 struct nvme_command cmd; 1797 1798 memset(&cmd, 0, sizeof(cmd)); 1799 if (send) 1800 cmd.common.opcode = nvme_admin_security_send; 1801 else 1802 cmd.common.opcode = nvme_admin_security_recv; 1803 cmd.common.nsid = 0; 1804 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 1805 cmd.common.cdw11 = cpu_to_le32(len); 1806 1807 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 1808 ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false); 1809 } 1810 EXPORT_SYMBOL_GPL(nvme_sec_submit); 1811 #endif /* CONFIG_BLK_SED_OPAL */ 1812 1813 static const struct block_device_operations nvme_fops = { 1814 .owner = THIS_MODULE, 1815 .ioctl = nvme_ioctl, 1816 .compat_ioctl = nvme_ioctl, 1817 .open = nvme_open, 1818 .release = nvme_release, 1819 .getgeo = nvme_getgeo, 1820 .revalidate_disk= nvme_revalidate_disk, 1821 .pr_ops = &nvme_pr_ops, 1822 }; 1823 1824 #ifdef CONFIG_NVME_MULTIPATH 1825 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) 1826 { 1827 struct nvme_ns_head *head = bdev->bd_disk->private_data; 1828 1829 if (!kref_get_unless_zero(&head->ref)) 1830 return -ENXIO; 1831 return 0; 1832 } 1833 1834 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) 1835 { 1836 nvme_put_ns_head(disk->private_data); 1837 } 1838 1839 const struct block_device_operations nvme_ns_head_ops = { 1840 .owner = THIS_MODULE, 1841 .open = nvme_ns_head_open, 1842 .release = nvme_ns_head_release, 1843 .ioctl = nvme_ioctl, 1844 .compat_ioctl = nvme_ioctl, 1845 .getgeo = nvme_getgeo, 1846 .pr_ops = &nvme_pr_ops, 1847 }; 1848 #endif /* CONFIG_NVME_MULTIPATH */ 1849 1850 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 1851 { 1852 unsigned long timeout = 1853 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1854 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 1855 int ret; 1856 1857 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1858 if (csts == ~0) 1859 return -ENODEV; 1860 if ((csts & NVME_CSTS_RDY) == bit) 1861 break; 1862 1863 msleep(100); 1864 if (fatal_signal_pending(current)) 1865 return -EINTR; 1866 if (time_after(jiffies, timeout)) { 1867 dev_err(ctrl->device, 1868 "Device not ready; aborting %s\n", enabled ? 1869 "initialisation" : "reset"); 1870 return -ENODEV; 1871 } 1872 } 1873 1874 return ret; 1875 } 1876 1877 /* 1878 * If the device has been passed off to us in an enabled state, just clear 1879 * the enabled bit. The spec says we should set the 'shutdown notification 1880 * bits', but doing so may cause the device to complete commands to the 1881 * admin queue ... and we don't know what memory that might be pointing at! 1882 */ 1883 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1884 { 1885 int ret; 1886 1887 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1888 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 1889 1890 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1891 if (ret) 1892 return ret; 1893 1894 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 1895 msleep(NVME_QUIRK_DELAY_AMOUNT); 1896 1897 return nvme_wait_ready(ctrl, cap, false); 1898 } 1899 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 1900 1901 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1902 { 1903 /* 1904 * Default to a 4K page size, with the intention to update this 1905 * path in the future to accomodate architectures with differing 1906 * kernel and IO page sizes. 1907 */ 1908 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12; 1909 int ret; 1910 1911 if (page_shift < dev_page_min) { 1912 dev_err(ctrl->device, 1913 "Minimum device page size %u too large for host (%u)\n", 1914 1 << dev_page_min, 1 << page_shift); 1915 return -ENODEV; 1916 } 1917 1918 ctrl->page_size = 1 << page_shift; 1919 1920 ctrl->ctrl_config = NVME_CC_CSS_NVM; 1921 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; 1922 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 1923 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1924 ctrl->ctrl_config |= NVME_CC_ENABLE; 1925 1926 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1927 if (ret) 1928 return ret; 1929 return nvme_wait_ready(ctrl, cap, true); 1930 } 1931 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 1932 1933 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 1934 { 1935 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); 1936 u32 csts; 1937 int ret; 1938 1939 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1940 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 1941 1942 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1943 if (ret) 1944 return ret; 1945 1946 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1947 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 1948 break; 1949 1950 msleep(100); 1951 if (fatal_signal_pending(current)) 1952 return -EINTR; 1953 if (time_after(jiffies, timeout)) { 1954 dev_err(ctrl->device, 1955 "Device shutdown incomplete; abort shutdown\n"); 1956 return -ENODEV; 1957 } 1958 } 1959 1960 return ret; 1961 } 1962 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 1963 1964 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1965 struct request_queue *q) 1966 { 1967 bool vwc = false; 1968 1969 if (ctrl->max_hw_sectors) { 1970 u32 max_segments = 1971 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 1972 1973 max_segments = min_not_zero(max_segments, ctrl->max_segments); 1974 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1975 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1976 } 1977 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 1978 is_power_of_2(ctrl->max_hw_sectors)) 1979 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); 1980 blk_queue_virt_boundary(q, ctrl->page_size - 1); 1981 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 1982 vwc = true; 1983 blk_queue_write_cache(q, vwc, vwc); 1984 } 1985 1986 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 1987 { 1988 __le64 ts; 1989 int ret; 1990 1991 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 1992 return 0; 1993 1994 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 1995 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 1996 NULL); 1997 if (ret) 1998 dev_warn_once(ctrl->device, 1999 "could not set timestamp (%d)\n", ret); 2000 return ret; 2001 } 2002 2003 static int nvme_configure_acre(struct nvme_ctrl *ctrl) 2004 { 2005 struct nvme_feat_host_behavior *host; 2006 int ret; 2007 2008 /* Don't bother enabling the feature if retry delay is not reported */ 2009 if (!ctrl->crdt[0]) 2010 return 0; 2011 2012 host = kzalloc(sizeof(*host), GFP_KERNEL); 2013 if (!host) 2014 return 0; 2015 2016 host->acre = NVME_ENABLE_ACRE; 2017 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2018 host, sizeof(*host), NULL); 2019 kfree(host); 2020 return ret; 2021 } 2022 2023 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2024 { 2025 /* 2026 * APST (Autonomous Power State Transition) lets us program a 2027 * table of power state transitions that the controller will 2028 * perform automatically. We configure it with a simple 2029 * heuristic: we are willing to spend at most 2% of the time 2030 * transitioning between power states. Therefore, when running 2031 * in any given state, we will enter the next lower-power 2032 * non-operational state after waiting 50 * (enlat + exlat) 2033 * microseconds, as long as that state's exit latency is under 2034 * the requested maximum latency. 2035 * 2036 * We will not autonomously enter any non-operational state for 2037 * which the total latency exceeds ps_max_latency_us. Users 2038 * can set ps_max_latency_us to zero to turn off APST. 2039 */ 2040 2041 unsigned apste; 2042 struct nvme_feat_auto_pst *table; 2043 u64 max_lat_us = 0; 2044 int max_ps = -1; 2045 int ret; 2046 2047 /* 2048 * If APST isn't supported or if we haven't been initialized yet, 2049 * then don't do anything. 2050 */ 2051 if (!ctrl->apsta) 2052 return 0; 2053 2054 if (ctrl->npss > 31) { 2055 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2056 return 0; 2057 } 2058 2059 table = kzalloc(sizeof(*table), GFP_KERNEL); 2060 if (!table) 2061 return 0; 2062 2063 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2064 /* Turn off APST. */ 2065 apste = 0; 2066 dev_dbg(ctrl->device, "APST disabled\n"); 2067 } else { 2068 __le64 target = cpu_to_le64(0); 2069 int state; 2070 2071 /* 2072 * Walk through all states from lowest- to highest-power. 2073 * According to the spec, lower-numbered states use more 2074 * power. NPSS, despite the name, is the index of the 2075 * lowest-power state, not the number of states. 2076 */ 2077 for (state = (int)ctrl->npss; state >= 0; state--) { 2078 u64 total_latency_us, exit_latency_us, transition_ms; 2079 2080 if (target) 2081 table->entries[state] = target; 2082 2083 /* 2084 * Don't allow transitions to the deepest state 2085 * if it's quirked off. 2086 */ 2087 if (state == ctrl->npss && 2088 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2089 continue; 2090 2091 /* 2092 * Is this state a useful non-operational state for 2093 * higher-power states to autonomously transition to? 2094 */ 2095 if (!(ctrl->psd[state].flags & 2096 NVME_PS_FLAGS_NON_OP_STATE)) 2097 continue; 2098 2099 exit_latency_us = 2100 (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2101 if (exit_latency_us > ctrl->ps_max_latency_us) 2102 continue; 2103 2104 total_latency_us = 2105 exit_latency_us + 2106 le32_to_cpu(ctrl->psd[state].entry_lat); 2107 2108 /* 2109 * This state is good. Use it as the APST idle 2110 * target for higher power states. 2111 */ 2112 transition_ms = total_latency_us + 19; 2113 do_div(transition_ms, 20); 2114 if (transition_ms > (1 << 24) - 1) 2115 transition_ms = (1 << 24) - 1; 2116 2117 target = cpu_to_le64((state << 3) | 2118 (transition_ms << 8)); 2119 2120 if (max_ps == -1) 2121 max_ps = state; 2122 2123 if (total_latency_us > max_lat_us) 2124 max_lat_us = total_latency_us; 2125 } 2126 2127 apste = 1; 2128 2129 if (max_ps == -1) { 2130 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2131 } else { 2132 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2133 max_ps, max_lat_us, (int)sizeof(*table), table); 2134 } 2135 } 2136 2137 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2138 table, sizeof(*table), NULL); 2139 if (ret) 2140 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2141 2142 kfree(table); 2143 return ret; 2144 } 2145 2146 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2147 { 2148 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2149 u64 latency; 2150 2151 switch (val) { 2152 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2153 case PM_QOS_LATENCY_ANY: 2154 latency = U64_MAX; 2155 break; 2156 2157 default: 2158 latency = val; 2159 } 2160 2161 if (ctrl->ps_max_latency_us != latency) { 2162 ctrl->ps_max_latency_us = latency; 2163 nvme_configure_apst(ctrl); 2164 } 2165 } 2166 2167 struct nvme_core_quirk_entry { 2168 /* 2169 * NVMe model and firmware strings are padded with spaces. For 2170 * simplicity, strings in the quirk table are padded with NULLs 2171 * instead. 2172 */ 2173 u16 vid; 2174 const char *mn; 2175 const char *fr; 2176 unsigned long quirks; 2177 }; 2178 2179 static const struct nvme_core_quirk_entry core_quirks[] = { 2180 { 2181 /* 2182 * This Toshiba device seems to die using any APST states. See: 2183 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2184 */ 2185 .vid = 0x1179, 2186 .mn = "THNSF5256GPUK TOSHIBA", 2187 .quirks = NVME_QUIRK_NO_APST, 2188 } 2189 }; 2190 2191 /* match is null-terminated but idstr is space-padded. */ 2192 static bool string_matches(const char *idstr, const char *match, size_t len) 2193 { 2194 size_t matchlen; 2195 2196 if (!match) 2197 return true; 2198 2199 matchlen = strlen(match); 2200 WARN_ON_ONCE(matchlen > len); 2201 2202 if (memcmp(idstr, match, matchlen)) 2203 return false; 2204 2205 for (; matchlen < len; matchlen++) 2206 if (idstr[matchlen] != ' ') 2207 return false; 2208 2209 return true; 2210 } 2211 2212 static bool quirk_matches(const struct nvme_id_ctrl *id, 2213 const struct nvme_core_quirk_entry *q) 2214 { 2215 return q->vid == le16_to_cpu(id->vid) && 2216 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2217 string_matches(id->fr, q->fr, sizeof(id->fr)); 2218 } 2219 2220 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2221 struct nvme_id_ctrl *id) 2222 { 2223 size_t nqnlen; 2224 int off; 2225 2226 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2227 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2228 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2229 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2230 return; 2231 } 2232 2233 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2234 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2235 } 2236 2237 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 2238 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2239 "nqn.2014.08.org.nvmexpress:%04x%04x", 2240 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2241 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2242 off += sizeof(id->sn); 2243 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2244 off += sizeof(id->mn); 2245 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2246 } 2247 2248 static void __nvme_release_subsystem(struct nvme_subsystem *subsys) 2249 { 2250 ida_simple_remove(&nvme_subsystems_ida, subsys->instance); 2251 kfree(subsys); 2252 } 2253 2254 static void nvme_release_subsystem(struct device *dev) 2255 { 2256 __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev)); 2257 } 2258 2259 static void nvme_destroy_subsystem(struct kref *ref) 2260 { 2261 struct nvme_subsystem *subsys = 2262 container_of(ref, struct nvme_subsystem, ref); 2263 2264 mutex_lock(&nvme_subsystems_lock); 2265 list_del(&subsys->entry); 2266 mutex_unlock(&nvme_subsystems_lock); 2267 2268 ida_destroy(&subsys->ns_ida); 2269 device_del(&subsys->dev); 2270 put_device(&subsys->dev); 2271 } 2272 2273 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2274 { 2275 kref_put(&subsys->ref, nvme_destroy_subsystem); 2276 } 2277 2278 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2279 { 2280 struct nvme_subsystem *subsys; 2281 2282 lockdep_assert_held(&nvme_subsystems_lock); 2283 2284 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2285 if (strcmp(subsys->subnqn, subsysnqn)) 2286 continue; 2287 if (!kref_get_unless_zero(&subsys->ref)) 2288 continue; 2289 return subsys; 2290 } 2291 2292 return NULL; 2293 } 2294 2295 #define SUBSYS_ATTR_RO(_name, _mode, _show) \ 2296 struct device_attribute subsys_attr_##_name = \ 2297 __ATTR(_name, _mode, _show, NULL) 2298 2299 static ssize_t nvme_subsys_show_nqn(struct device *dev, 2300 struct device_attribute *attr, 2301 char *buf) 2302 { 2303 struct nvme_subsystem *subsys = 2304 container_of(dev, struct nvme_subsystem, dev); 2305 2306 return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); 2307 } 2308 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2309 2310 #define nvme_subsys_show_str_function(field) \ 2311 static ssize_t subsys_##field##_show(struct device *dev, \ 2312 struct device_attribute *attr, char *buf) \ 2313 { \ 2314 struct nvme_subsystem *subsys = \ 2315 container_of(dev, struct nvme_subsystem, dev); \ 2316 return sprintf(buf, "%.*s\n", \ 2317 (int)sizeof(subsys->field), subsys->field); \ 2318 } \ 2319 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 2320 2321 nvme_subsys_show_str_function(model); 2322 nvme_subsys_show_str_function(serial); 2323 nvme_subsys_show_str_function(firmware_rev); 2324 2325 static struct attribute *nvme_subsys_attrs[] = { 2326 &subsys_attr_model.attr, 2327 &subsys_attr_serial.attr, 2328 &subsys_attr_firmware_rev.attr, 2329 &subsys_attr_subsysnqn.attr, 2330 #ifdef CONFIG_NVME_MULTIPATH 2331 &subsys_attr_iopolicy.attr, 2332 #endif 2333 NULL, 2334 }; 2335 2336 static struct attribute_group nvme_subsys_attrs_group = { 2337 .attrs = nvme_subsys_attrs, 2338 }; 2339 2340 static const struct attribute_group *nvme_subsys_attrs_groups[] = { 2341 &nvme_subsys_attrs_group, 2342 NULL, 2343 }; 2344 2345 static int nvme_active_ctrls(struct nvme_subsystem *subsys) 2346 { 2347 int count = 0; 2348 struct nvme_ctrl *ctrl; 2349 2350 mutex_lock(&subsys->lock); 2351 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { 2352 if (ctrl->state != NVME_CTRL_DELETING && 2353 ctrl->state != NVME_CTRL_DEAD) 2354 count++; 2355 } 2356 mutex_unlock(&subsys->lock); 2357 2358 return count; 2359 } 2360 2361 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2362 { 2363 struct nvme_subsystem *subsys, *found; 2364 int ret; 2365 2366 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2367 if (!subsys) 2368 return -ENOMEM; 2369 ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL); 2370 if (ret < 0) { 2371 kfree(subsys); 2372 return ret; 2373 } 2374 subsys->instance = ret; 2375 mutex_init(&subsys->lock); 2376 kref_init(&subsys->ref); 2377 INIT_LIST_HEAD(&subsys->ctrls); 2378 INIT_LIST_HEAD(&subsys->nsheads); 2379 nvme_init_subnqn(subsys, ctrl, id); 2380 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2381 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2382 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); 2383 subsys->vendor_id = le16_to_cpu(id->vid); 2384 subsys->cmic = id->cmic; 2385 #ifdef CONFIG_NVME_MULTIPATH 2386 subsys->iopolicy = NVME_IOPOLICY_NUMA; 2387 #endif 2388 2389 subsys->dev.class = nvme_subsys_class; 2390 subsys->dev.release = nvme_release_subsystem; 2391 subsys->dev.groups = nvme_subsys_attrs_groups; 2392 dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance); 2393 device_initialize(&subsys->dev); 2394 2395 mutex_lock(&nvme_subsystems_lock); 2396 found = __nvme_find_get_subsystem(subsys->subnqn); 2397 if (found) { 2398 /* 2399 * Verify that the subsystem actually supports multiple 2400 * controllers, else bail out. 2401 */ 2402 if (!(ctrl->opts && ctrl->opts->discovery_nqn) && 2403 nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) { 2404 dev_err(ctrl->device, 2405 "ignoring ctrl due to duplicate subnqn (%s).\n", 2406 found->subnqn); 2407 nvme_put_subsystem(found); 2408 ret = -EINVAL; 2409 goto out_unlock; 2410 } 2411 2412 __nvme_release_subsystem(subsys); 2413 subsys = found; 2414 } else { 2415 ret = device_add(&subsys->dev); 2416 if (ret) { 2417 dev_err(ctrl->device, 2418 "failed to register subsystem device.\n"); 2419 goto out_unlock; 2420 } 2421 ida_init(&subsys->ns_ida); 2422 list_add_tail(&subsys->entry, &nvme_subsystems); 2423 } 2424 2425 ctrl->subsys = subsys; 2426 mutex_unlock(&nvme_subsystems_lock); 2427 2428 if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2429 dev_name(ctrl->device))) { 2430 dev_err(ctrl->device, 2431 "failed to create sysfs link from subsystem.\n"); 2432 /* the transport driver will eventually put the subsystem */ 2433 return -EINVAL; 2434 } 2435 2436 mutex_lock(&subsys->lock); 2437 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2438 mutex_unlock(&subsys->lock); 2439 2440 return 0; 2441 2442 out_unlock: 2443 mutex_unlock(&nvme_subsystems_lock); 2444 put_device(&subsys->dev); 2445 return ret; 2446 } 2447 2448 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, 2449 void *log, size_t size, u64 offset) 2450 { 2451 struct nvme_command c = { }; 2452 unsigned long dwlen = size / 4 - 1; 2453 2454 c.get_log_page.opcode = nvme_admin_get_log_page; 2455 c.get_log_page.nsid = cpu_to_le32(nsid); 2456 c.get_log_page.lid = log_page; 2457 c.get_log_page.lsp = lsp; 2458 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 2459 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 2460 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 2461 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 2462 2463 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2464 } 2465 2466 static int nvme_get_effects_log(struct nvme_ctrl *ctrl) 2467 { 2468 int ret; 2469 2470 if (!ctrl->effects) 2471 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); 2472 2473 if (!ctrl->effects) 2474 return 0; 2475 2476 ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, 2477 ctrl->effects, sizeof(*ctrl->effects), 0); 2478 if (ret) { 2479 kfree(ctrl->effects); 2480 ctrl->effects = NULL; 2481 } 2482 return ret; 2483 } 2484 2485 /* 2486 * Initialize the cached copies of the Identify data and various controller 2487 * register in our nvme_ctrl structure. This should be called as soon as 2488 * the admin queue is fully up and running. 2489 */ 2490 int nvme_init_identify(struct nvme_ctrl *ctrl) 2491 { 2492 struct nvme_id_ctrl *id; 2493 u64 cap; 2494 int ret, page_shift; 2495 u32 max_hw_sectors; 2496 bool prev_apst_enabled; 2497 2498 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 2499 if (ret) { 2500 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 2501 return ret; 2502 } 2503 2504 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); 2505 if (ret) { 2506 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2507 return ret; 2508 } 2509 page_shift = NVME_CAP_MPSMIN(cap) + 12; 2510 2511 if (ctrl->vs >= NVME_VS(1, 1, 0)) 2512 ctrl->subsystem = NVME_CAP_NSSRC(cap); 2513 2514 ret = nvme_identify_ctrl(ctrl, &id); 2515 if (ret) { 2516 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 2517 return -EIO; 2518 } 2519 2520 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 2521 ret = nvme_get_effects_log(ctrl); 2522 if (ret < 0) 2523 goto out_free; 2524 } 2525 2526 if (!ctrl->identified) { 2527 int i; 2528 2529 ret = nvme_init_subsystem(ctrl, id); 2530 if (ret) 2531 goto out_free; 2532 2533 /* 2534 * Check for quirks. Quirk can depend on firmware version, 2535 * so, in principle, the set of quirks present can change 2536 * across a reset. As a possible future enhancement, we 2537 * could re-scan for quirks every time we reinitialize 2538 * the device, but we'd have to make sure that the driver 2539 * behaves intelligently if the quirks change. 2540 */ 2541 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 2542 if (quirk_matches(id, &core_quirks[i])) 2543 ctrl->quirks |= core_quirks[i].quirks; 2544 } 2545 } 2546 2547 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 2548 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 2549 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 2550 } 2551 2552 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 2553 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 2554 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 2555 2556 ctrl->oacs = le16_to_cpu(id->oacs); 2557 ctrl->oncs = le16_to_cpu(id->oncs); 2558 ctrl->oaes = le32_to_cpu(id->oaes); 2559 atomic_set(&ctrl->abort_limit, id->acl + 1); 2560 ctrl->vwc = id->vwc; 2561 if (id->mdts) 2562 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 2563 else 2564 max_hw_sectors = UINT_MAX; 2565 ctrl->max_hw_sectors = 2566 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 2567 2568 nvme_set_queue_limits(ctrl, ctrl->admin_q); 2569 ctrl->sgls = le32_to_cpu(id->sgls); 2570 ctrl->kas = le16_to_cpu(id->kas); 2571 ctrl->max_namespaces = le32_to_cpu(id->mnan); 2572 ctrl->ctratt = le32_to_cpu(id->ctratt); 2573 2574 if (id->rtd3e) { 2575 /* us -> s */ 2576 u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000; 2577 2578 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 2579 shutdown_timeout, 60); 2580 2581 if (ctrl->shutdown_timeout != shutdown_timeout) 2582 dev_info(ctrl->device, 2583 "Shutdown timeout set to %u seconds\n", 2584 ctrl->shutdown_timeout); 2585 } else 2586 ctrl->shutdown_timeout = shutdown_timeout; 2587 2588 ctrl->npss = id->npss; 2589 ctrl->apsta = id->apsta; 2590 prev_apst_enabled = ctrl->apst_enabled; 2591 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 2592 if (force_apst && id->apsta) { 2593 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 2594 ctrl->apst_enabled = true; 2595 } else { 2596 ctrl->apst_enabled = false; 2597 } 2598 } else { 2599 ctrl->apst_enabled = id->apsta; 2600 } 2601 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 2602 2603 if (ctrl->ops->flags & NVME_F_FABRICS) { 2604 ctrl->icdoff = le16_to_cpu(id->icdoff); 2605 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 2606 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 2607 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 2608 2609 /* 2610 * In fabrics we need to verify the cntlid matches the 2611 * admin connect 2612 */ 2613 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 2614 ret = -EINVAL; 2615 goto out_free; 2616 } 2617 2618 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 2619 dev_err(ctrl->device, 2620 "keep-alive support is mandatory for fabrics\n"); 2621 ret = -EINVAL; 2622 goto out_free; 2623 } 2624 } else { 2625 ctrl->cntlid = le16_to_cpu(id->cntlid); 2626 ctrl->hmpre = le32_to_cpu(id->hmpre); 2627 ctrl->hmmin = le32_to_cpu(id->hmmin); 2628 ctrl->hmminds = le32_to_cpu(id->hmminds); 2629 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 2630 } 2631 2632 ret = nvme_mpath_init(ctrl, id); 2633 kfree(id); 2634 2635 if (ret < 0) 2636 return ret; 2637 2638 if (ctrl->apst_enabled && !prev_apst_enabled) 2639 dev_pm_qos_expose_latency_tolerance(ctrl->device); 2640 else if (!ctrl->apst_enabled && prev_apst_enabled) 2641 dev_pm_qos_hide_latency_tolerance(ctrl->device); 2642 2643 ret = nvme_configure_apst(ctrl); 2644 if (ret < 0) 2645 return ret; 2646 2647 ret = nvme_configure_timestamp(ctrl); 2648 if (ret < 0) 2649 return ret; 2650 2651 ret = nvme_configure_directives(ctrl); 2652 if (ret < 0) 2653 return ret; 2654 2655 ret = nvme_configure_acre(ctrl); 2656 if (ret < 0) 2657 return ret; 2658 2659 ctrl->identified = true; 2660 2661 return 0; 2662 2663 out_free: 2664 kfree(id); 2665 return ret; 2666 } 2667 EXPORT_SYMBOL_GPL(nvme_init_identify); 2668 2669 static int nvme_dev_open(struct inode *inode, struct file *file) 2670 { 2671 struct nvme_ctrl *ctrl = 2672 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 2673 2674 switch (ctrl->state) { 2675 case NVME_CTRL_LIVE: 2676 case NVME_CTRL_ADMIN_ONLY: 2677 break; 2678 default: 2679 return -EWOULDBLOCK; 2680 } 2681 2682 file->private_data = ctrl; 2683 return 0; 2684 } 2685 2686 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 2687 { 2688 struct nvme_ns *ns; 2689 int ret; 2690 2691 down_read(&ctrl->namespaces_rwsem); 2692 if (list_empty(&ctrl->namespaces)) { 2693 ret = -ENOTTY; 2694 goto out_unlock; 2695 } 2696 2697 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 2698 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 2699 dev_warn(ctrl->device, 2700 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 2701 ret = -EINVAL; 2702 goto out_unlock; 2703 } 2704 2705 dev_warn(ctrl->device, 2706 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 2707 kref_get(&ns->kref); 2708 up_read(&ctrl->namespaces_rwsem); 2709 2710 ret = nvme_user_cmd(ctrl, ns, argp); 2711 nvme_put_ns(ns); 2712 return ret; 2713 2714 out_unlock: 2715 up_read(&ctrl->namespaces_rwsem); 2716 return ret; 2717 } 2718 2719 static long nvme_dev_ioctl(struct file *file, unsigned int cmd, 2720 unsigned long arg) 2721 { 2722 struct nvme_ctrl *ctrl = file->private_data; 2723 void __user *argp = (void __user *)arg; 2724 2725 switch (cmd) { 2726 case NVME_IOCTL_ADMIN_CMD: 2727 return nvme_user_cmd(ctrl, NULL, argp); 2728 case NVME_IOCTL_IO_CMD: 2729 return nvme_dev_user_cmd(ctrl, argp); 2730 case NVME_IOCTL_RESET: 2731 dev_warn(ctrl->device, "resetting controller\n"); 2732 return nvme_reset_ctrl_sync(ctrl); 2733 case NVME_IOCTL_SUBSYS_RESET: 2734 return nvme_reset_subsystem(ctrl); 2735 case NVME_IOCTL_RESCAN: 2736 nvme_queue_scan(ctrl); 2737 return 0; 2738 default: 2739 return -ENOTTY; 2740 } 2741 } 2742 2743 static const struct file_operations nvme_dev_fops = { 2744 .owner = THIS_MODULE, 2745 .open = nvme_dev_open, 2746 .unlocked_ioctl = nvme_dev_ioctl, 2747 .compat_ioctl = nvme_dev_ioctl, 2748 }; 2749 2750 static ssize_t nvme_sysfs_reset(struct device *dev, 2751 struct device_attribute *attr, const char *buf, 2752 size_t count) 2753 { 2754 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2755 int ret; 2756 2757 ret = nvme_reset_ctrl_sync(ctrl); 2758 if (ret < 0) 2759 return ret; 2760 return count; 2761 } 2762 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 2763 2764 static ssize_t nvme_sysfs_rescan(struct device *dev, 2765 struct device_attribute *attr, const char *buf, 2766 size_t count) 2767 { 2768 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2769 2770 nvme_queue_scan(ctrl); 2771 return count; 2772 } 2773 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 2774 2775 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 2776 { 2777 struct gendisk *disk = dev_to_disk(dev); 2778 2779 if (disk->fops == &nvme_fops) 2780 return nvme_get_ns_from_dev(dev)->head; 2781 else 2782 return disk->private_data; 2783 } 2784 2785 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 2786 char *buf) 2787 { 2788 struct nvme_ns_head *head = dev_to_ns_head(dev); 2789 struct nvme_ns_ids *ids = &head->ids; 2790 struct nvme_subsystem *subsys = head->subsys; 2791 int serial_len = sizeof(subsys->serial); 2792 int model_len = sizeof(subsys->model); 2793 2794 if (!uuid_is_null(&ids->uuid)) 2795 return sprintf(buf, "uuid.%pU\n", &ids->uuid); 2796 2797 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2798 return sprintf(buf, "eui.%16phN\n", ids->nguid); 2799 2800 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 2801 return sprintf(buf, "eui.%8phN\n", ids->eui64); 2802 2803 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 2804 subsys->serial[serial_len - 1] == '\0')) 2805 serial_len--; 2806 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 2807 subsys->model[model_len - 1] == '\0')) 2808 model_len--; 2809 2810 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 2811 serial_len, subsys->serial, model_len, subsys->model, 2812 head->ns_id); 2813 } 2814 static DEVICE_ATTR_RO(wwid); 2815 2816 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 2817 char *buf) 2818 { 2819 return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 2820 } 2821 static DEVICE_ATTR_RO(nguid); 2822 2823 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 2824 char *buf) 2825 { 2826 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 2827 2828 /* For backward compatibility expose the NGUID to userspace if 2829 * we have no UUID set 2830 */ 2831 if (uuid_is_null(&ids->uuid)) { 2832 printk_ratelimited(KERN_WARNING 2833 "No UUID available providing old NGUID\n"); 2834 return sprintf(buf, "%pU\n", ids->nguid); 2835 } 2836 return sprintf(buf, "%pU\n", &ids->uuid); 2837 } 2838 static DEVICE_ATTR_RO(uuid); 2839 2840 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 2841 char *buf) 2842 { 2843 return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 2844 } 2845 static DEVICE_ATTR_RO(eui); 2846 2847 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 2848 char *buf) 2849 { 2850 return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 2851 } 2852 static DEVICE_ATTR_RO(nsid); 2853 2854 static struct attribute *nvme_ns_id_attrs[] = { 2855 &dev_attr_wwid.attr, 2856 &dev_attr_uuid.attr, 2857 &dev_attr_nguid.attr, 2858 &dev_attr_eui.attr, 2859 &dev_attr_nsid.attr, 2860 #ifdef CONFIG_NVME_MULTIPATH 2861 &dev_attr_ana_grpid.attr, 2862 &dev_attr_ana_state.attr, 2863 #endif 2864 NULL, 2865 }; 2866 2867 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, 2868 struct attribute *a, int n) 2869 { 2870 struct device *dev = container_of(kobj, struct device, kobj); 2871 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 2872 2873 if (a == &dev_attr_uuid.attr) { 2874 if (uuid_is_null(&ids->uuid) && 2875 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2876 return 0; 2877 } 2878 if (a == &dev_attr_nguid.attr) { 2879 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2880 return 0; 2881 } 2882 if (a == &dev_attr_eui.attr) { 2883 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 2884 return 0; 2885 } 2886 #ifdef CONFIG_NVME_MULTIPATH 2887 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { 2888 if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */ 2889 return 0; 2890 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) 2891 return 0; 2892 } 2893 #endif 2894 return a->mode; 2895 } 2896 2897 static const struct attribute_group nvme_ns_id_attr_group = { 2898 .attrs = nvme_ns_id_attrs, 2899 .is_visible = nvme_ns_id_attrs_are_visible, 2900 }; 2901 2902 const struct attribute_group *nvme_ns_id_attr_groups[] = { 2903 &nvme_ns_id_attr_group, 2904 #ifdef CONFIG_NVM 2905 &nvme_nvm_attr_group, 2906 #endif 2907 NULL, 2908 }; 2909 2910 #define nvme_show_str_function(field) \ 2911 static ssize_t field##_show(struct device *dev, \ 2912 struct device_attribute *attr, char *buf) \ 2913 { \ 2914 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 2915 return sprintf(buf, "%.*s\n", \ 2916 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 2917 } \ 2918 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 2919 2920 nvme_show_str_function(model); 2921 nvme_show_str_function(serial); 2922 nvme_show_str_function(firmware_rev); 2923 2924 #define nvme_show_int_function(field) \ 2925 static ssize_t field##_show(struct device *dev, \ 2926 struct device_attribute *attr, char *buf) \ 2927 { \ 2928 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 2929 return sprintf(buf, "%d\n", ctrl->field); \ 2930 } \ 2931 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 2932 2933 nvme_show_int_function(cntlid); 2934 nvme_show_int_function(numa_node); 2935 2936 static ssize_t nvme_sysfs_delete(struct device *dev, 2937 struct device_attribute *attr, const char *buf, 2938 size_t count) 2939 { 2940 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2941 2942 if (device_remove_file_self(dev, attr)) 2943 nvme_delete_ctrl_sync(ctrl); 2944 return count; 2945 } 2946 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 2947 2948 static ssize_t nvme_sysfs_show_transport(struct device *dev, 2949 struct device_attribute *attr, 2950 char *buf) 2951 { 2952 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2953 2954 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); 2955 } 2956 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 2957 2958 static ssize_t nvme_sysfs_show_state(struct device *dev, 2959 struct device_attribute *attr, 2960 char *buf) 2961 { 2962 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2963 static const char *const state_name[] = { 2964 [NVME_CTRL_NEW] = "new", 2965 [NVME_CTRL_LIVE] = "live", 2966 [NVME_CTRL_ADMIN_ONLY] = "only-admin", 2967 [NVME_CTRL_RESETTING] = "resetting", 2968 [NVME_CTRL_CONNECTING] = "connecting", 2969 [NVME_CTRL_DELETING] = "deleting", 2970 [NVME_CTRL_DEAD] = "dead", 2971 }; 2972 2973 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 2974 state_name[ctrl->state]) 2975 return sprintf(buf, "%s\n", state_name[ctrl->state]); 2976 2977 return sprintf(buf, "unknown state\n"); 2978 } 2979 2980 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 2981 2982 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 2983 struct device_attribute *attr, 2984 char *buf) 2985 { 2986 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2987 2988 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); 2989 } 2990 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 2991 2992 static ssize_t nvme_sysfs_show_address(struct device *dev, 2993 struct device_attribute *attr, 2994 char *buf) 2995 { 2996 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2997 2998 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 2999 } 3000 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 3001 3002 static struct attribute *nvme_dev_attrs[] = { 3003 &dev_attr_reset_controller.attr, 3004 &dev_attr_rescan_controller.attr, 3005 &dev_attr_model.attr, 3006 &dev_attr_serial.attr, 3007 &dev_attr_firmware_rev.attr, 3008 &dev_attr_cntlid.attr, 3009 &dev_attr_delete_controller.attr, 3010 &dev_attr_transport.attr, 3011 &dev_attr_subsysnqn.attr, 3012 &dev_attr_address.attr, 3013 &dev_attr_state.attr, 3014 &dev_attr_numa_node.attr, 3015 NULL 3016 }; 3017 3018 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 3019 struct attribute *a, int n) 3020 { 3021 struct device *dev = container_of(kobj, struct device, kobj); 3022 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3023 3024 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 3025 return 0; 3026 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 3027 return 0; 3028 3029 return a->mode; 3030 } 3031 3032 static struct attribute_group nvme_dev_attrs_group = { 3033 .attrs = nvme_dev_attrs, 3034 .is_visible = nvme_dev_attrs_are_visible, 3035 }; 3036 3037 static const struct attribute_group *nvme_dev_attr_groups[] = { 3038 &nvme_dev_attrs_group, 3039 NULL, 3040 }; 3041 3042 static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys, 3043 unsigned nsid) 3044 { 3045 struct nvme_ns_head *h; 3046 3047 lockdep_assert_held(&subsys->lock); 3048 3049 list_for_each_entry(h, &subsys->nsheads, entry) { 3050 if (h->ns_id == nsid && kref_get_unless_zero(&h->ref)) 3051 return h; 3052 } 3053 3054 return NULL; 3055 } 3056 3057 static int __nvme_check_ids(struct nvme_subsystem *subsys, 3058 struct nvme_ns_head *new) 3059 { 3060 struct nvme_ns_head *h; 3061 3062 lockdep_assert_held(&subsys->lock); 3063 3064 list_for_each_entry(h, &subsys->nsheads, entry) { 3065 if (nvme_ns_ids_valid(&new->ids) && 3066 !list_empty(&h->list) && 3067 nvme_ns_ids_equal(&new->ids, &h->ids)) 3068 return -EINVAL; 3069 } 3070 3071 return 0; 3072 } 3073 3074 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 3075 unsigned nsid, struct nvme_id_ns *id) 3076 { 3077 struct nvme_ns_head *head; 3078 size_t size = sizeof(*head); 3079 int ret = -ENOMEM; 3080 3081 #ifdef CONFIG_NVME_MULTIPATH 3082 size += num_possible_nodes() * sizeof(struct nvme_ns *); 3083 #endif 3084 3085 head = kzalloc(size, GFP_KERNEL); 3086 if (!head) 3087 goto out; 3088 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); 3089 if (ret < 0) 3090 goto out_free_head; 3091 head->instance = ret; 3092 INIT_LIST_HEAD(&head->list); 3093 ret = init_srcu_struct(&head->srcu); 3094 if (ret) 3095 goto out_ida_remove; 3096 head->subsys = ctrl->subsys; 3097 head->ns_id = nsid; 3098 kref_init(&head->ref); 3099 3100 nvme_report_ns_ids(ctrl, nsid, id, &head->ids); 3101 3102 ret = __nvme_check_ids(ctrl->subsys, head); 3103 if (ret) { 3104 dev_err(ctrl->device, 3105 "duplicate IDs for nsid %d\n", nsid); 3106 goto out_cleanup_srcu; 3107 } 3108 3109 ret = nvme_mpath_alloc_disk(ctrl, head); 3110 if (ret) 3111 goto out_cleanup_srcu; 3112 3113 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 3114 3115 kref_get(&ctrl->subsys->ref); 3116 3117 return head; 3118 out_cleanup_srcu: 3119 cleanup_srcu_struct(&head->srcu); 3120 out_ida_remove: 3121 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); 3122 out_free_head: 3123 kfree(head); 3124 out: 3125 return ERR_PTR(ret); 3126 } 3127 3128 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, 3129 struct nvme_id_ns *id) 3130 { 3131 struct nvme_ctrl *ctrl = ns->ctrl; 3132 bool is_shared = id->nmic & (1 << 0); 3133 struct nvme_ns_head *head = NULL; 3134 int ret = 0; 3135 3136 mutex_lock(&ctrl->subsys->lock); 3137 if (is_shared) 3138 head = __nvme_find_ns_head(ctrl->subsys, nsid); 3139 if (!head) { 3140 head = nvme_alloc_ns_head(ctrl, nsid, id); 3141 if (IS_ERR(head)) { 3142 ret = PTR_ERR(head); 3143 goto out_unlock; 3144 } 3145 } else { 3146 struct nvme_ns_ids ids; 3147 3148 nvme_report_ns_ids(ctrl, nsid, id, &ids); 3149 if (!nvme_ns_ids_equal(&head->ids, &ids)) { 3150 dev_err(ctrl->device, 3151 "IDs don't match for shared namespace %d\n", 3152 nsid); 3153 ret = -EINVAL; 3154 goto out_unlock; 3155 } 3156 } 3157 3158 list_add_tail(&ns->siblings, &head->list); 3159 ns->head = head; 3160 3161 out_unlock: 3162 mutex_unlock(&ctrl->subsys->lock); 3163 return ret; 3164 } 3165 3166 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) 3167 { 3168 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 3169 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 3170 3171 return nsa->head->ns_id - nsb->head->ns_id; 3172 } 3173 3174 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3175 { 3176 struct nvme_ns *ns, *ret = NULL; 3177 3178 down_read(&ctrl->namespaces_rwsem); 3179 list_for_each_entry(ns, &ctrl->namespaces, list) { 3180 if (ns->head->ns_id == nsid) { 3181 if (!kref_get_unless_zero(&ns->kref)) 3182 continue; 3183 ret = ns; 3184 break; 3185 } 3186 if (ns->head->ns_id > nsid) 3187 break; 3188 } 3189 up_read(&ctrl->namespaces_rwsem); 3190 return ret; 3191 } 3192 3193 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns) 3194 { 3195 struct streams_directive_params s; 3196 int ret; 3197 3198 if (!ctrl->nr_streams) 3199 return 0; 3200 3201 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); 3202 if (ret) 3203 return ret; 3204 3205 ns->sws = le32_to_cpu(s.sws); 3206 ns->sgs = le16_to_cpu(s.sgs); 3207 3208 if (ns->sws) { 3209 unsigned int bs = 1 << ns->lba_shift; 3210 3211 blk_queue_io_min(ns->queue, bs * ns->sws); 3212 if (ns->sgs) 3213 blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs); 3214 } 3215 3216 return 0; 3217 } 3218 3219 static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3220 { 3221 struct nvme_ns *ns; 3222 struct gendisk *disk; 3223 struct nvme_id_ns *id; 3224 char disk_name[DISK_NAME_LEN]; 3225 int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret; 3226 3227 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 3228 if (!ns) 3229 return -ENOMEM; 3230 3231 ns->queue = blk_mq_init_queue(ctrl->tagset); 3232 if (IS_ERR(ns->queue)) { 3233 ret = PTR_ERR(ns->queue); 3234 goto out_free_ns; 3235 } 3236 3237 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 3238 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) 3239 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); 3240 3241 ns->queue->queuedata = ns; 3242 ns->ctrl = ctrl; 3243 3244 kref_init(&ns->kref); 3245 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 3246 3247 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 3248 nvme_set_queue_limits(ctrl, ns->queue); 3249 3250 id = nvme_identify_ns(ctrl, nsid); 3251 if (!id) { 3252 ret = -EIO; 3253 goto out_free_queue; 3254 } 3255 3256 if (id->ncap == 0) { 3257 ret = -EINVAL; 3258 goto out_free_id; 3259 } 3260 3261 ret = nvme_init_ns_head(ns, nsid, id); 3262 if (ret) 3263 goto out_free_id; 3264 nvme_setup_streams_ns(ctrl, ns); 3265 nvme_set_disk_name(disk_name, ns, ctrl, &flags); 3266 3267 disk = alloc_disk_node(0, node); 3268 if (!disk) { 3269 ret = -ENOMEM; 3270 goto out_unlink_ns; 3271 } 3272 3273 disk->fops = &nvme_fops; 3274 disk->private_data = ns; 3275 disk->queue = ns->queue; 3276 disk->flags = flags; 3277 memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); 3278 ns->disk = disk; 3279 3280 __nvme_revalidate_disk(disk, id); 3281 3282 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { 3283 ret = nvme_nvm_register(ns, disk_name, node); 3284 if (ret) { 3285 dev_warn(ctrl->device, "LightNVM init failure\n"); 3286 goto out_put_disk; 3287 } 3288 } 3289 3290 down_write(&ctrl->namespaces_rwsem); 3291 list_add_tail(&ns->list, &ctrl->namespaces); 3292 up_write(&ctrl->namespaces_rwsem); 3293 3294 nvme_get_ctrl(ctrl); 3295 3296 device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups); 3297 3298 nvme_mpath_add_disk(ns, id); 3299 nvme_fault_inject_init(ns); 3300 kfree(id); 3301 3302 return 0; 3303 out_put_disk: 3304 put_disk(ns->disk); 3305 out_unlink_ns: 3306 mutex_lock(&ctrl->subsys->lock); 3307 list_del_rcu(&ns->siblings); 3308 mutex_unlock(&ctrl->subsys->lock); 3309 nvme_put_ns_head(ns->head); 3310 out_free_id: 3311 kfree(id); 3312 out_free_queue: 3313 blk_cleanup_queue(ns->queue); 3314 out_free_ns: 3315 kfree(ns); 3316 return ret; 3317 } 3318 3319 static void nvme_ns_remove(struct nvme_ns *ns) 3320 { 3321 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 3322 return; 3323 3324 nvme_fault_inject_fini(ns); 3325 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 3326 del_gendisk(ns->disk); 3327 blk_cleanup_queue(ns->queue); 3328 if (blk_get_integrity(ns->disk)) 3329 blk_integrity_unregister(ns->disk); 3330 } 3331 3332 mutex_lock(&ns->ctrl->subsys->lock); 3333 list_del_rcu(&ns->siblings); 3334 nvme_mpath_clear_current_path(ns); 3335 mutex_unlock(&ns->ctrl->subsys->lock); 3336 3337 down_write(&ns->ctrl->namespaces_rwsem); 3338 list_del_init(&ns->list); 3339 up_write(&ns->ctrl->namespaces_rwsem); 3340 3341 synchronize_srcu(&ns->head->srcu); 3342 nvme_mpath_check_last_path(ns); 3343 nvme_put_ns(ns); 3344 } 3345 3346 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3347 { 3348 struct nvme_ns *ns; 3349 3350 ns = nvme_find_get_ns(ctrl, nsid); 3351 if (ns) { 3352 if (ns->disk && revalidate_disk(ns->disk)) 3353 nvme_ns_remove(ns); 3354 nvme_put_ns(ns); 3355 } else 3356 nvme_alloc_ns(ctrl, nsid); 3357 } 3358 3359 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 3360 unsigned nsid) 3361 { 3362 struct nvme_ns *ns, *next; 3363 LIST_HEAD(rm_list); 3364 3365 down_write(&ctrl->namespaces_rwsem); 3366 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3367 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) 3368 list_move_tail(&ns->list, &rm_list); 3369 } 3370 up_write(&ctrl->namespaces_rwsem); 3371 3372 list_for_each_entry_safe(ns, next, &rm_list, list) 3373 nvme_ns_remove(ns); 3374 3375 } 3376 3377 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) 3378 { 3379 struct nvme_ns *ns; 3380 __le32 *ns_list; 3381 unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); 3382 int ret = 0; 3383 3384 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 3385 if (!ns_list) 3386 return -ENOMEM; 3387 3388 for (i = 0; i < num_lists; i++) { 3389 ret = nvme_identify_ns_list(ctrl, prev, ns_list); 3390 if (ret) 3391 goto free; 3392 3393 for (j = 0; j < min(nn, 1024U); j++) { 3394 nsid = le32_to_cpu(ns_list[j]); 3395 if (!nsid) 3396 goto out; 3397 3398 nvme_validate_ns(ctrl, nsid); 3399 3400 while (++prev < nsid) { 3401 ns = nvme_find_get_ns(ctrl, prev); 3402 if (ns) { 3403 nvme_ns_remove(ns); 3404 nvme_put_ns(ns); 3405 } 3406 } 3407 } 3408 nn -= j; 3409 } 3410 out: 3411 nvme_remove_invalid_namespaces(ctrl, prev); 3412 free: 3413 kfree(ns_list); 3414 return ret; 3415 } 3416 3417 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) 3418 { 3419 unsigned i; 3420 3421 for (i = 1; i <= nn; i++) 3422 nvme_validate_ns(ctrl, i); 3423 3424 nvme_remove_invalid_namespaces(ctrl, nn); 3425 } 3426 3427 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 3428 { 3429 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 3430 __le32 *log; 3431 int error; 3432 3433 log = kzalloc(log_size, GFP_KERNEL); 3434 if (!log) 3435 return; 3436 3437 /* 3438 * We need to read the log to clear the AEN, but we don't want to rely 3439 * on it for the changed namespace information as userspace could have 3440 * raced with us in reading the log page, which could cause us to miss 3441 * updates. 3442 */ 3443 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log, 3444 log_size, 0); 3445 if (error) 3446 dev_warn(ctrl->device, 3447 "reading changed ns log failed: %d\n", error); 3448 3449 kfree(log); 3450 } 3451 3452 static void nvme_scan_work(struct work_struct *work) 3453 { 3454 struct nvme_ctrl *ctrl = 3455 container_of(work, struct nvme_ctrl, scan_work); 3456 struct nvme_id_ctrl *id; 3457 unsigned nn; 3458 3459 if (ctrl->state != NVME_CTRL_LIVE) 3460 return; 3461 3462 WARN_ON_ONCE(!ctrl->tagset); 3463 3464 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 3465 dev_info(ctrl->device, "rescanning namespaces.\n"); 3466 nvme_clear_changed_ns_log(ctrl); 3467 } 3468 3469 if (nvme_identify_ctrl(ctrl, &id)) 3470 return; 3471 3472 mutex_lock(&ctrl->scan_lock); 3473 nn = le32_to_cpu(id->nn); 3474 if (ctrl->vs >= NVME_VS(1, 1, 0) && 3475 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 3476 if (!nvme_scan_ns_list(ctrl, nn)) 3477 goto out_free_id; 3478 } 3479 nvme_scan_ns_sequential(ctrl, nn); 3480 out_free_id: 3481 mutex_unlock(&ctrl->scan_lock); 3482 kfree(id); 3483 down_write(&ctrl->namespaces_rwsem); 3484 list_sort(NULL, &ctrl->namespaces, ns_cmp); 3485 up_write(&ctrl->namespaces_rwsem); 3486 } 3487 3488 /* 3489 * This function iterates the namespace list unlocked to allow recovery from 3490 * controller failure. It is up to the caller to ensure the namespace list is 3491 * not modified by scan work while this function is executing. 3492 */ 3493 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 3494 { 3495 struct nvme_ns *ns, *next; 3496 LIST_HEAD(ns_list); 3497 3498 /* prevent racing with ns scanning */ 3499 flush_work(&ctrl->scan_work); 3500 3501 /* 3502 * The dead states indicates the controller was not gracefully 3503 * disconnected. In that case, we won't be able to flush any data while 3504 * removing the namespaces' disks; fail all the queues now to avoid 3505 * potentially having to clean up the failed sync later. 3506 */ 3507 if (ctrl->state == NVME_CTRL_DEAD) 3508 nvme_kill_queues(ctrl); 3509 3510 down_write(&ctrl->namespaces_rwsem); 3511 list_splice_init(&ctrl->namespaces, &ns_list); 3512 up_write(&ctrl->namespaces_rwsem); 3513 3514 list_for_each_entry_safe(ns, next, &ns_list, list) 3515 nvme_ns_remove(ns); 3516 } 3517 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 3518 3519 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 3520 { 3521 char *envp[2] = { NULL, NULL }; 3522 u32 aen_result = ctrl->aen_result; 3523 3524 ctrl->aen_result = 0; 3525 if (!aen_result) 3526 return; 3527 3528 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 3529 if (!envp[0]) 3530 return; 3531 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 3532 kfree(envp[0]); 3533 } 3534 3535 static void nvme_async_event_work(struct work_struct *work) 3536 { 3537 struct nvme_ctrl *ctrl = 3538 container_of(work, struct nvme_ctrl, async_event_work); 3539 3540 nvme_aen_uevent(ctrl); 3541 ctrl->ops->submit_async_event(ctrl); 3542 } 3543 3544 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 3545 { 3546 3547 u32 csts; 3548 3549 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 3550 return false; 3551 3552 if (csts == ~0) 3553 return false; 3554 3555 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 3556 } 3557 3558 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 3559 { 3560 struct nvme_fw_slot_info_log *log; 3561 3562 log = kmalloc(sizeof(*log), GFP_KERNEL); 3563 if (!log) 3564 return; 3565 3566 if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, 3567 sizeof(*log), 0)) 3568 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 3569 kfree(log); 3570 } 3571 3572 static void nvme_fw_act_work(struct work_struct *work) 3573 { 3574 struct nvme_ctrl *ctrl = container_of(work, 3575 struct nvme_ctrl, fw_act_work); 3576 unsigned long fw_act_timeout; 3577 3578 if (ctrl->mtfa) 3579 fw_act_timeout = jiffies + 3580 msecs_to_jiffies(ctrl->mtfa * 100); 3581 else 3582 fw_act_timeout = jiffies + 3583 msecs_to_jiffies(admin_timeout * 1000); 3584 3585 nvme_stop_queues(ctrl); 3586 while (nvme_ctrl_pp_status(ctrl)) { 3587 if (time_after(jiffies, fw_act_timeout)) { 3588 dev_warn(ctrl->device, 3589 "Fw activation timeout, reset controller\n"); 3590 nvme_reset_ctrl(ctrl); 3591 break; 3592 } 3593 msleep(100); 3594 } 3595 3596 if (ctrl->state != NVME_CTRL_LIVE) 3597 return; 3598 3599 nvme_start_queues(ctrl); 3600 /* read FW slot information to clear the AER */ 3601 nvme_get_fw_slot_info(ctrl); 3602 } 3603 3604 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 3605 { 3606 u32 aer_notice_type = (result & 0xff00) >> 8; 3607 3608 switch (aer_notice_type) { 3609 case NVME_AER_NOTICE_NS_CHANGED: 3610 trace_nvme_async_event(ctrl, aer_notice_type); 3611 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 3612 nvme_queue_scan(ctrl); 3613 break; 3614 case NVME_AER_NOTICE_FW_ACT_STARTING: 3615 trace_nvme_async_event(ctrl, aer_notice_type); 3616 queue_work(nvme_wq, &ctrl->fw_act_work); 3617 break; 3618 #ifdef CONFIG_NVME_MULTIPATH 3619 case NVME_AER_NOTICE_ANA: 3620 trace_nvme_async_event(ctrl, aer_notice_type); 3621 if (!ctrl->ana_log_buf) 3622 break; 3623 queue_work(nvme_wq, &ctrl->ana_work); 3624 break; 3625 #endif 3626 default: 3627 dev_warn(ctrl->device, "async event result %08x\n", result); 3628 } 3629 } 3630 3631 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 3632 volatile union nvme_result *res) 3633 { 3634 u32 result = le32_to_cpu(res->u32); 3635 u32 aer_type = result & 0x07; 3636 3637 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 3638 return; 3639 3640 switch (aer_type) { 3641 case NVME_AER_NOTICE: 3642 nvme_handle_aen_notice(ctrl, result); 3643 break; 3644 case NVME_AER_ERROR: 3645 case NVME_AER_SMART: 3646 case NVME_AER_CSS: 3647 case NVME_AER_VS: 3648 trace_nvme_async_event(ctrl, aer_type); 3649 ctrl->aen_result = result; 3650 break; 3651 default: 3652 break; 3653 } 3654 queue_work(nvme_wq, &ctrl->async_event_work); 3655 } 3656 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 3657 3658 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 3659 { 3660 nvme_mpath_stop(ctrl); 3661 nvme_stop_keep_alive(ctrl); 3662 flush_work(&ctrl->async_event_work); 3663 cancel_work_sync(&ctrl->fw_act_work); 3664 } 3665 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 3666 3667 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 3668 { 3669 if (ctrl->kato) 3670 nvme_start_keep_alive(ctrl); 3671 3672 if (ctrl->queue_count > 1) { 3673 nvme_queue_scan(ctrl); 3674 nvme_enable_aen(ctrl); 3675 queue_work(nvme_wq, &ctrl->async_event_work); 3676 nvme_start_queues(ctrl); 3677 } 3678 } 3679 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 3680 3681 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 3682 { 3683 cdev_device_del(&ctrl->cdev, ctrl->device); 3684 } 3685 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 3686 3687 static void nvme_free_ctrl(struct device *dev) 3688 { 3689 struct nvme_ctrl *ctrl = 3690 container_of(dev, struct nvme_ctrl, ctrl_device); 3691 struct nvme_subsystem *subsys = ctrl->subsys; 3692 3693 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 3694 kfree(ctrl->effects); 3695 nvme_mpath_uninit(ctrl); 3696 __free_page(ctrl->discard_page); 3697 3698 if (subsys) { 3699 mutex_lock(&subsys->lock); 3700 list_del(&ctrl->subsys_entry); 3701 mutex_unlock(&subsys->lock); 3702 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 3703 } 3704 3705 ctrl->ops->free_ctrl(ctrl); 3706 3707 if (subsys) 3708 nvme_put_subsystem(subsys); 3709 } 3710 3711 /* 3712 * Initialize a NVMe controller structures. This needs to be called during 3713 * earliest initialization so that we have the initialized structured around 3714 * during probing. 3715 */ 3716 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 3717 const struct nvme_ctrl_ops *ops, unsigned long quirks) 3718 { 3719 int ret; 3720 3721 ctrl->state = NVME_CTRL_NEW; 3722 spin_lock_init(&ctrl->lock); 3723 mutex_init(&ctrl->scan_lock); 3724 INIT_LIST_HEAD(&ctrl->namespaces); 3725 init_rwsem(&ctrl->namespaces_rwsem); 3726 ctrl->dev = dev; 3727 ctrl->ops = ops; 3728 ctrl->quirks = quirks; 3729 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 3730 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 3731 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 3732 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 3733 3734 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 3735 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 3736 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 3737 3738 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 3739 PAGE_SIZE); 3740 ctrl->discard_page = alloc_page(GFP_KERNEL); 3741 if (!ctrl->discard_page) { 3742 ret = -ENOMEM; 3743 goto out; 3744 } 3745 3746 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); 3747 if (ret < 0) 3748 goto out; 3749 ctrl->instance = ret; 3750 3751 device_initialize(&ctrl->ctrl_device); 3752 ctrl->device = &ctrl->ctrl_device; 3753 ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance); 3754 ctrl->device->class = nvme_class; 3755 ctrl->device->parent = ctrl->dev; 3756 ctrl->device->groups = nvme_dev_attr_groups; 3757 ctrl->device->release = nvme_free_ctrl; 3758 dev_set_drvdata(ctrl->device, ctrl); 3759 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 3760 if (ret) 3761 goto out_release_instance; 3762 3763 cdev_init(&ctrl->cdev, &nvme_dev_fops); 3764 ctrl->cdev.owner = ops->module; 3765 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 3766 if (ret) 3767 goto out_free_name; 3768 3769 /* 3770 * Initialize latency tolerance controls. The sysfs files won't 3771 * be visible to userspace unless the device actually supports APST. 3772 */ 3773 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 3774 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 3775 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 3776 3777 return 0; 3778 out_free_name: 3779 kfree_const(ctrl->device->kobj.name); 3780 out_release_instance: 3781 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 3782 out: 3783 if (ctrl->discard_page) 3784 __free_page(ctrl->discard_page); 3785 return ret; 3786 } 3787 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 3788 3789 /** 3790 * nvme_kill_queues(): Ends all namespace queues 3791 * @ctrl: the dead controller that needs to end 3792 * 3793 * Call this function when the driver determines it is unable to get the 3794 * controller in a state capable of servicing IO. 3795 */ 3796 void nvme_kill_queues(struct nvme_ctrl *ctrl) 3797 { 3798 struct nvme_ns *ns; 3799 3800 down_read(&ctrl->namespaces_rwsem); 3801 3802 /* Forcibly unquiesce queues to avoid blocking dispatch */ 3803 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) 3804 blk_mq_unquiesce_queue(ctrl->admin_q); 3805 3806 list_for_each_entry(ns, &ctrl->namespaces, list) 3807 nvme_set_queue_dying(ns); 3808 3809 up_read(&ctrl->namespaces_rwsem); 3810 } 3811 EXPORT_SYMBOL_GPL(nvme_kill_queues); 3812 3813 void nvme_unfreeze(struct nvme_ctrl *ctrl) 3814 { 3815 struct nvme_ns *ns; 3816 3817 down_read(&ctrl->namespaces_rwsem); 3818 list_for_each_entry(ns, &ctrl->namespaces, list) 3819 blk_mq_unfreeze_queue(ns->queue); 3820 up_read(&ctrl->namespaces_rwsem); 3821 } 3822 EXPORT_SYMBOL_GPL(nvme_unfreeze); 3823 3824 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 3825 { 3826 struct nvme_ns *ns; 3827 3828 down_read(&ctrl->namespaces_rwsem); 3829 list_for_each_entry(ns, &ctrl->namespaces, list) { 3830 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 3831 if (timeout <= 0) 3832 break; 3833 } 3834 up_read(&ctrl->namespaces_rwsem); 3835 } 3836 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 3837 3838 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 3839 { 3840 struct nvme_ns *ns; 3841 3842 down_read(&ctrl->namespaces_rwsem); 3843 list_for_each_entry(ns, &ctrl->namespaces, list) 3844 blk_mq_freeze_queue_wait(ns->queue); 3845 up_read(&ctrl->namespaces_rwsem); 3846 } 3847 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 3848 3849 void nvme_start_freeze(struct nvme_ctrl *ctrl) 3850 { 3851 struct nvme_ns *ns; 3852 3853 down_read(&ctrl->namespaces_rwsem); 3854 list_for_each_entry(ns, &ctrl->namespaces, list) 3855 blk_freeze_queue_start(ns->queue); 3856 up_read(&ctrl->namespaces_rwsem); 3857 } 3858 EXPORT_SYMBOL_GPL(nvme_start_freeze); 3859 3860 void nvme_stop_queues(struct nvme_ctrl *ctrl) 3861 { 3862 struct nvme_ns *ns; 3863 3864 down_read(&ctrl->namespaces_rwsem); 3865 list_for_each_entry(ns, &ctrl->namespaces, list) 3866 blk_mq_quiesce_queue(ns->queue); 3867 up_read(&ctrl->namespaces_rwsem); 3868 } 3869 EXPORT_SYMBOL_GPL(nvme_stop_queues); 3870 3871 void nvme_start_queues(struct nvme_ctrl *ctrl) 3872 { 3873 struct nvme_ns *ns; 3874 3875 down_read(&ctrl->namespaces_rwsem); 3876 list_for_each_entry(ns, &ctrl->namespaces, list) 3877 blk_mq_unquiesce_queue(ns->queue); 3878 up_read(&ctrl->namespaces_rwsem); 3879 } 3880 EXPORT_SYMBOL_GPL(nvme_start_queues); 3881 3882 /* 3883 * Check we didn't inadvertently grow the command structure sizes: 3884 */ 3885 static inline void _nvme_check_size(void) 3886 { 3887 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 3888 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 3889 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 3890 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 3891 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 3892 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 3893 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 3894 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 3895 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 3896 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 3897 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 3898 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 3899 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 3900 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 3901 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 3902 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 3903 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 3904 } 3905 3906 3907 static int __init nvme_core_init(void) 3908 { 3909 int result = -ENOMEM; 3910 3911 _nvme_check_size(); 3912 3913 nvme_wq = alloc_workqueue("nvme-wq", 3914 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 3915 if (!nvme_wq) 3916 goto out; 3917 3918 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 3919 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 3920 if (!nvme_reset_wq) 3921 goto destroy_wq; 3922 3923 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 3924 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 3925 if (!nvme_delete_wq) 3926 goto destroy_reset_wq; 3927 3928 result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme"); 3929 if (result < 0) 3930 goto destroy_delete_wq; 3931 3932 nvme_class = class_create(THIS_MODULE, "nvme"); 3933 if (IS_ERR(nvme_class)) { 3934 result = PTR_ERR(nvme_class); 3935 goto unregister_chrdev; 3936 } 3937 3938 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); 3939 if (IS_ERR(nvme_subsys_class)) { 3940 result = PTR_ERR(nvme_subsys_class); 3941 goto destroy_class; 3942 } 3943 return 0; 3944 3945 destroy_class: 3946 class_destroy(nvme_class); 3947 unregister_chrdev: 3948 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 3949 destroy_delete_wq: 3950 destroy_workqueue(nvme_delete_wq); 3951 destroy_reset_wq: 3952 destroy_workqueue(nvme_reset_wq); 3953 destroy_wq: 3954 destroy_workqueue(nvme_wq); 3955 out: 3956 return result; 3957 } 3958 3959 static void __exit nvme_core_exit(void) 3960 { 3961 ida_destroy(&nvme_subsystems_ida); 3962 class_destroy(nvme_subsys_class); 3963 class_destroy(nvme_class); 3964 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 3965 destroy_workqueue(nvme_delete_wq); 3966 destroy_workqueue(nvme_reset_wq); 3967 destroy_workqueue(nvme_wq); 3968 } 3969 3970 MODULE_LICENSE("GPL"); 3971 MODULE_VERSION("1.0"); 3972 module_init(nvme_core_init); 3973 module_exit(nvme_core_exit); 3974