1 /* 2 * NVM Express device driver 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/blkdev.h> 16 #include <linux/blk-mq.h> 17 #include <linux/delay.h> 18 #include <linux/errno.h> 19 #include <linux/hdreg.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/list_sort.h> 23 #include <linux/slab.h> 24 #include <linux/types.h> 25 #include <linux/pr.h> 26 #include <linux/ptrace.h> 27 #include <linux/nvme_ioctl.h> 28 #include <linux/t10-pi.h> 29 #include <linux/pm_qos.h> 30 #include <asm/unaligned.h> 31 32 #include "nvme.h" 33 #include "fabrics.h" 34 35 #define NVME_MINORS (1U << MINORBITS) 36 37 unsigned int admin_timeout = 60; 38 module_param(admin_timeout, uint, 0644); 39 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 40 EXPORT_SYMBOL_GPL(admin_timeout); 41 42 unsigned int nvme_io_timeout = 30; 43 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 44 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 45 EXPORT_SYMBOL_GPL(nvme_io_timeout); 46 47 static unsigned char shutdown_timeout = 5; 48 module_param(shutdown_timeout, byte, 0644); 49 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 50 51 static u8 nvme_max_retries = 5; 52 module_param_named(max_retries, nvme_max_retries, byte, 0644); 53 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 54 55 static unsigned long default_ps_max_latency_us = 100000; 56 module_param(default_ps_max_latency_us, ulong, 0644); 57 MODULE_PARM_DESC(default_ps_max_latency_us, 58 "max power saving latency for new devices; use PM QOS to change per device"); 59 60 static bool force_apst; 61 module_param(force_apst, bool, 0644); 62 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 63 64 static bool streams; 65 module_param(streams, bool, 0644); 66 MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); 67 68 struct workqueue_struct *nvme_wq; 69 EXPORT_SYMBOL_GPL(nvme_wq); 70 71 static DEFINE_IDA(nvme_subsystems_ida); 72 static LIST_HEAD(nvme_subsystems); 73 static DEFINE_MUTEX(nvme_subsystems_lock); 74 75 static DEFINE_IDA(nvme_instance_ida); 76 static dev_t nvme_chr_devt; 77 static struct class *nvme_class; 78 static struct class *nvme_subsys_class; 79 80 static void nvme_ns_remove(struct nvme_ns *ns); 81 static int nvme_revalidate_disk(struct gendisk *disk); 82 83 static __le32 nvme_get_log_dw10(u8 lid, size_t size) 84 { 85 return cpu_to_le32((((size / 4) - 1) << 16) | lid); 86 } 87 88 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 89 { 90 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 91 return -EBUSY; 92 if (!queue_work(nvme_wq, &ctrl->reset_work)) 93 return -EBUSY; 94 return 0; 95 } 96 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 97 98 static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 99 { 100 int ret; 101 102 ret = nvme_reset_ctrl(ctrl); 103 if (!ret) 104 flush_work(&ctrl->reset_work); 105 return ret; 106 } 107 108 static void nvme_delete_ctrl_work(struct work_struct *work) 109 { 110 struct nvme_ctrl *ctrl = 111 container_of(work, struct nvme_ctrl, delete_work); 112 113 flush_work(&ctrl->reset_work); 114 nvme_stop_ctrl(ctrl); 115 nvme_remove_namespaces(ctrl); 116 ctrl->ops->delete_ctrl(ctrl); 117 nvme_uninit_ctrl(ctrl); 118 nvme_put_ctrl(ctrl); 119 } 120 121 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 122 { 123 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 124 return -EBUSY; 125 if (!queue_work(nvme_wq, &ctrl->delete_work)) 126 return -EBUSY; 127 return 0; 128 } 129 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 130 131 int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 132 { 133 int ret = 0; 134 135 /* 136 * Keep a reference until the work is flushed since ->delete_ctrl 137 * can free the controller. 138 */ 139 nvme_get_ctrl(ctrl); 140 ret = nvme_delete_ctrl(ctrl); 141 if (!ret) 142 flush_work(&ctrl->delete_work); 143 nvme_put_ctrl(ctrl); 144 return ret; 145 } 146 EXPORT_SYMBOL_GPL(nvme_delete_ctrl_sync); 147 148 static inline bool nvme_ns_has_pi(struct nvme_ns *ns) 149 { 150 return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); 151 } 152 153 static blk_status_t nvme_error_status(struct request *req) 154 { 155 switch (nvme_req(req)->status & 0x7ff) { 156 case NVME_SC_SUCCESS: 157 return BLK_STS_OK; 158 case NVME_SC_CAP_EXCEEDED: 159 return BLK_STS_NOSPC; 160 case NVME_SC_ONCS_NOT_SUPPORTED: 161 return BLK_STS_NOTSUPP; 162 case NVME_SC_WRITE_FAULT: 163 case NVME_SC_READ_ERROR: 164 case NVME_SC_UNWRITTEN_BLOCK: 165 case NVME_SC_ACCESS_DENIED: 166 case NVME_SC_READ_ONLY: 167 return BLK_STS_MEDIUM; 168 case NVME_SC_GUARD_CHECK: 169 case NVME_SC_APPTAG_CHECK: 170 case NVME_SC_REFTAG_CHECK: 171 case NVME_SC_INVALID_PI: 172 return BLK_STS_PROTECTION; 173 case NVME_SC_RESERVATION_CONFLICT: 174 return BLK_STS_NEXUS; 175 default: 176 return BLK_STS_IOERR; 177 } 178 } 179 180 static inline bool nvme_req_needs_retry(struct request *req) 181 { 182 if (blk_noretry_request(req)) 183 return false; 184 if (nvme_req(req)->status & NVME_SC_DNR) 185 return false; 186 if (nvme_req(req)->retries >= nvme_max_retries) 187 return false; 188 return true; 189 } 190 191 void nvme_complete_rq(struct request *req) 192 { 193 if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) { 194 if (nvme_req_needs_failover(req)) { 195 nvme_failover_req(req); 196 return; 197 } 198 199 if (!blk_queue_dying(req->q)) { 200 nvme_req(req)->retries++; 201 blk_mq_requeue_request(req, true); 202 return; 203 } 204 } 205 206 blk_mq_end_request(req, nvme_error_status(req)); 207 } 208 EXPORT_SYMBOL_GPL(nvme_complete_rq); 209 210 void nvme_cancel_request(struct request *req, void *data, bool reserved) 211 { 212 if (!blk_mq_request_started(req)) 213 return; 214 215 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 216 "Cancelling I/O %d", req->tag); 217 218 nvme_req(req)->status = NVME_SC_ABORT_REQ; 219 blk_mq_complete_request(req); 220 221 } 222 EXPORT_SYMBOL_GPL(nvme_cancel_request); 223 224 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 225 enum nvme_ctrl_state new_state) 226 { 227 enum nvme_ctrl_state old_state; 228 unsigned long flags; 229 bool changed = false; 230 231 spin_lock_irqsave(&ctrl->lock, flags); 232 233 old_state = ctrl->state; 234 switch (new_state) { 235 case NVME_CTRL_LIVE: 236 switch (old_state) { 237 case NVME_CTRL_NEW: 238 case NVME_CTRL_RESETTING: 239 case NVME_CTRL_RECONNECTING: 240 changed = true; 241 /* FALLTHRU */ 242 default: 243 break; 244 } 245 break; 246 case NVME_CTRL_RESETTING: 247 switch (old_state) { 248 case NVME_CTRL_NEW: 249 case NVME_CTRL_LIVE: 250 changed = true; 251 /* FALLTHRU */ 252 default: 253 break; 254 } 255 break; 256 case NVME_CTRL_RECONNECTING: 257 switch (old_state) { 258 case NVME_CTRL_LIVE: 259 case NVME_CTRL_RESETTING: 260 changed = true; 261 /* FALLTHRU */ 262 default: 263 break; 264 } 265 break; 266 case NVME_CTRL_DELETING: 267 switch (old_state) { 268 case NVME_CTRL_LIVE: 269 case NVME_CTRL_RESETTING: 270 case NVME_CTRL_RECONNECTING: 271 changed = true; 272 /* FALLTHRU */ 273 default: 274 break; 275 } 276 break; 277 case NVME_CTRL_DEAD: 278 switch (old_state) { 279 case NVME_CTRL_DELETING: 280 changed = true; 281 /* FALLTHRU */ 282 default: 283 break; 284 } 285 break; 286 default: 287 break; 288 } 289 290 if (changed) 291 ctrl->state = new_state; 292 293 spin_unlock_irqrestore(&ctrl->lock, flags); 294 if (changed && ctrl->state == NVME_CTRL_LIVE) 295 nvme_kick_requeue_lists(ctrl); 296 return changed; 297 } 298 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 299 300 static void nvme_free_ns_head(struct kref *ref) 301 { 302 struct nvme_ns_head *head = 303 container_of(ref, struct nvme_ns_head, ref); 304 305 nvme_mpath_remove_disk(head); 306 ida_simple_remove(&head->subsys->ns_ida, head->instance); 307 list_del_init(&head->entry); 308 cleanup_srcu_struct(&head->srcu); 309 kfree(head); 310 } 311 312 static void nvme_put_ns_head(struct nvme_ns_head *head) 313 { 314 kref_put(&head->ref, nvme_free_ns_head); 315 } 316 317 static void nvme_free_ns(struct kref *kref) 318 { 319 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 320 321 if (ns->ndev) 322 nvme_nvm_unregister(ns); 323 324 put_disk(ns->disk); 325 nvme_put_ns_head(ns->head); 326 nvme_put_ctrl(ns->ctrl); 327 kfree(ns); 328 } 329 330 static void nvme_put_ns(struct nvme_ns *ns) 331 { 332 kref_put(&ns->kref, nvme_free_ns); 333 } 334 335 struct request *nvme_alloc_request(struct request_queue *q, 336 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) 337 { 338 unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 339 struct request *req; 340 341 if (qid == NVME_QID_ANY) { 342 req = blk_mq_alloc_request(q, op, flags); 343 } else { 344 req = blk_mq_alloc_request_hctx(q, op, flags, 345 qid ? qid - 1 : 0); 346 } 347 if (IS_ERR(req)) 348 return req; 349 350 req->cmd_flags |= REQ_FAILFAST_DRIVER; 351 nvme_req(req)->cmd = cmd; 352 353 return req; 354 } 355 EXPORT_SYMBOL_GPL(nvme_alloc_request); 356 357 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) 358 { 359 struct nvme_command c; 360 361 memset(&c, 0, sizeof(c)); 362 363 c.directive.opcode = nvme_admin_directive_send; 364 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); 365 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; 366 c.directive.dtype = NVME_DIR_IDENTIFY; 367 c.directive.tdtype = NVME_DIR_STREAMS; 368 c.directive.endir = enable ? NVME_DIR_ENDIR : 0; 369 370 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); 371 } 372 373 static int nvme_disable_streams(struct nvme_ctrl *ctrl) 374 { 375 return nvme_toggle_streams(ctrl, false); 376 } 377 378 static int nvme_enable_streams(struct nvme_ctrl *ctrl) 379 { 380 return nvme_toggle_streams(ctrl, true); 381 } 382 383 static int nvme_get_stream_params(struct nvme_ctrl *ctrl, 384 struct streams_directive_params *s, u32 nsid) 385 { 386 struct nvme_command c; 387 388 memset(&c, 0, sizeof(c)); 389 memset(s, 0, sizeof(*s)); 390 391 c.directive.opcode = nvme_admin_directive_recv; 392 c.directive.nsid = cpu_to_le32(nsid); 393 c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); 394 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 395 c.directive.dtype = NVME_DIR_STREAMS; 396 397 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); 398 } 399 400 static int nvme_configure_directives(struct nvme_ctrl *ctrl) 401 { 402 struct streams_directive_params s; 403 int ret; 404 405 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) 406 return 0; 407 if (!streams) 408 return 0; 409 410 ret = nvme_enable_streams(ctrl); 411 if (ret) 412 return ret; 413 414 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); 415 if (ret) 416 return ret; 417 418 ctrl->nssa = le16_to_cpu(s.nssa); 419 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { 420 dev_info(ctrl->device, "too few streams (%u) available\n", 421 ctrl->nssa); 422 nvme_disable_streams(ctrl); 423 return 0; 424 } 425 426 ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); 427 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); 428 return 0; 429 } 430 431 /* 432 * Check if 'req' has a write hint associated with it. If it does, assign 433 * a valid namespace stream to the write. 434 */ 435 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, 436 struct request *req, u16 *control, 437 u32 *dsmgmt) 438 { 439 enum rw_hint streamid = req->write_hint; 440 441 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE) 442 streamid = 0; 443 else { 444 streamid--; 445 if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) 446 return; 447 448 *control |= NVME_RW_DTYPE_STREAMS; 449 *dsmgmt |= streamid << 16; 450 } 451 452 if (streamid < ARRAY_SIZE(req->q->write_hints)) 453 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; 454 } 455 456 static inline void nvme_setup_flush(struct nvme_ns *ns, 457 struct nvme_command *cmnd) 458 { 459 memset(cmnd, 0, sizeof(*cmnd)); 460 cmnd->common.opcode = nvme_cmd_flush; 461 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 462 } 463 464 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 465 struct nvme_command *cmnd) 466 { 467 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 468 struct nvme_dsm_range *range; 469 struct bio *bio; 470 471 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); 472 if (!range) 473 return BLK_STS_RESOURCE; 474 475 __rq_for_each_bio(bio, req) { 476 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 477 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 478 479 range[n].cattr = cpu_to_le32(0); 480 range[n].nlb = cpu_to_le32(nlb); 481 range[n].slba = cpu_to_le64(slba); 482 n++; 483 } 484 485 if (WARN_ON_ONCE(n != segments)) { 486 kfree(range); 487 return BLK_STS_IOERR; 488 } 489 490 memset(cmnd, 0, sizeof(*cmnd)); 491 cmnd->dsm.opcode = nvme_cmd_dsm; 492 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 493 cmnd->dsm.nr = cpu_to_le32(segments - 1); 494 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 495 496 req->special_vec.bv_page = virt_to_page(range); 497 req->special_vec.bv_offset = offset_in_page(range); 498 req->special_vec.bv_len = sizeof(*range) * segments; 499 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 500 501 return BLK_STS_OK; 502 } 503 504 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 505 struct request *req, struct nvme_command *cmnd) 506 { 507 struct nvme_ctrl *ctrl = ns->ctrl; 508 u16 control = 0; 509 u32 dsmgmt = 0; 510 511 if (req->cmd_flags & REQ_FUA) 512 control |= NVME_RW_FUA; 513 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 514 control |= NVME_RW_LR; 515 516 if (req->cmd_flags & REQ_RAHEAD) 517 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 518 519 memset(cmnd, 0, sizeof(*cmnd)); 520 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); 521 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 522 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 523 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 524 525 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) 526 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); 527 528 if (ns->ms) { 529 /* 530 * If formated with metadata, the block layer always provides a 531 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 532 * we enable the PRACT bit for protection information or set the 533 * namespace capacity to zero to prevent any I/O. 534 */ 535 if (!blk_integrity_rq(req)) { 536 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 537 return BLK_STS_NOTSUPP; 538 control |= NVME_RW_PRINFO_PRACT; 539 } 540 541 switch (ns->pi_type) { 542 case NVME_NS_DPS_PI_TYPE3: 543 control |= NVME_RW_PRINFO_PRCHK_GUARD; 544 break; 545 case NVME_NS_DPS_PI_TYPE1: 546 case NVME_NS_DPS_PI_TYPE2: 547 control |= NVME_RW_PRINFO_PRCHK_GUARD | 548 NVME_RW_PRINFO_PRCHK_REF; 549 cmnd->rw.reftag = cpu_to_le32( 550 nvme_block_nr(ns, blk_rq_pos(req))); 551 break; 552 } 553 } 554 555 cmnd->rw.control = cpu_to_le16(control); 556 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 557 return 0; 558 } 559 560 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 561 struct nvme_command *cmd) 562 { 563 blk_status_t ret = BLK_STS_OK; 564 565 if (!(req->rq_flags & RQF_DONTPREP)) { 566 nvme_req(req)->retries = 0; 567 nvme_req(req)->flags = 0; 568 req->rq_flags |= RQF_DONTPREP; 569 } 570 571 switch (req_op(req)) { 572 case REQ_OP_DRV_IN: 573 case REQ_OP_DRV_OUT: 574 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); 575 break; 576 case REQ_OP_FLUSH: 577 nvme_setup_flush(ns, cmd); 578 break; 579 case REQ_OP_WRITE_ZEROES: 580 /* currently only aliased to deallocate for a few ctrls: */ 581 case REQ_OP_DISCARD: 582 ret = nvme_setup_discard(ns, req, cmd); 583 break; 584 case REQ_OP_READ: 585 case REQ_OP_WRITE: 586 ret = nvme_setup_rw(ns, req, cmd); 587 break; 588 default: 589 WARN_ON_ONCE(1); 590 return BLK_STS_IOERR; 591 } 592 593 cmd->common.command_id = req->tag; 594 return ret; 595 } 596 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 597 598 /* 599 * Returns 0 on success. If the result is negative, it's a Linux error code; 600 * if the result is positive, it's an NVM Express status code 601 */ 602 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 603 union nvme_result *result, void *buffer, unsigned bufflen, 604 unsigned timeout, int qid, int at_head, 605 blk_mq_req_flags_t flags) 606 { 607 struct request *req; 608 int ret; 609 610 req = nvme_alloc_request(q, cmd, flags, qid); 611 if (IS_ERR(req)) 612 return PTR_ERR(req); 613 614 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 615 616 if (buffer && bufflen) { 617 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 618 if (ret) 619 goto out; 620 } 621 622 blk_execute_rq(req->q, NULL, req, at_head); 623 if (result) 624 *result = nvme_req(req)->result; 625 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 626 ret = -EINTR; 627 else 628 ret = nvme_req(req)->status; 629 out: 630 blk_mq_free_request(req); 631 return ret; 632 } 633 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 634 635 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 636 void *buffer, unsigned bufflen) 637 { 638 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, 639 NVME_QID_ANY, 0, 0); 640 } 641 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 642 643 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, 644 unsigned len, u32 seed, bool write) 645 { 646 struct bio_integrity_payload *bip; 647 int ret = -ENOMEM; 648 void *buf; 649 650 buf = kmalloc(len, GFP_KERNEL); 651 if (!buf) 652 goto out; 653 654 ret = -EFAULT; 655 if (write && copy_from_user(buf, ubuf, len)) 656 goto out_free_meta; 657 658 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 659 if (IS_ERR(bip)) { 660 ret = PTR_ERR(bip); 661 goto out_free_meta; 662 } 663 664 bip->bip_iter.bi_size = len; 665 bip->bip_iter.bi_sector = seed; 666 ret = bio_integrity_add_page(bio, virt_to_page(buf), len, 667 offset_in_page(buf)); 668 if (ret == len) 669 return buf; 670 ret = -ENOMEM; 671 out_free_meta: 672 kfree(buf); 673 out: 674 return ERR_PTR(ret); 675 } 676 677 static int nvme_submit_user_cmd(struct request_queue *q, 678 struct nvme_command *cmd, void __user *ubuffer, 679 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 680 u32 meta_seed, u32 *result, unsigned timeout) 681 { 682 bool write = nvme_is_write(cmd); 683 struct nvme_ns *ns = q->queuedata; 684 struct gendisk *disk = ns ? ns->disk : NULL; 685 struct request *req; 686 struct bio *bio = NULL; 687 void *meta = NULL; 688 int ret; 689 690 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); 691 if (IS_ERR(req)) 692 return PTR_ERR(req); 693 694 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 695 696 if (ubuffer && bufflen) { 697 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 698 GFP_KERNEL); 699 if (ret) 700 goto out; 701 bio = req->bio; 702 bio->bi_disk = disk; 703 if (disk && meta_buffer && meta_len) { 704 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, 705 meta_seed, write); 706 if (IS_ERR(meta)) { 707 ret = PTR_ERR(meta); 708 goto out_unmap; 709 } 710 } 711 } 712 713 blk_execute_rq(req->q, disk, req, 0); 714 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 715 ret = -EINTR; 716 else 717 ret = nvme_req(req)->status; 718 if (result) 719 *result = le32_to_cpu(nvme_req(req)->result.u32); 720 if (meta && !ret && !write) { 721 if (copy_to_user(meta_buffer, meta, meta_len)) 722 ret = -EFAULT; 723 } 724 kfree(meta); 725 out_unmap: 726 if (bio) 727 blk_rq_unmap_user(bio); 728 out: 729 blk_mq_free_request(req); 730 return ret; 731 } 732 733 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) 734 { 735 struct nvme_ctrl *ctrl = rq->end_io_data; 736 737 blk_mq_free_request(rq); 738 739 if (status) { 740 dev_err(ctrl->device, 741 "failed nvme_keep_alive_end_io error=%d\n", 742 status); 743 return; 744 } 745 746 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 747 } 748 749 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 750 { 751 struct nvme_command c; 752 struct request *rq; 753 754 memset(&c, 0, sizeof(c)); 755 c.common.opcode = nvme_admin_keep_alive; 756 757 rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, 758 NVME_QID_ANY); 759 if (IS_ERR(rq)) 760 return PTR_ERR(rq); 761 762 rq->timeout = ctrl->kato * HZ; 763 rq->end_io_data = ctrl; 764 765 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); 766 767 return 0; 768 } 769 770 static void nvme_keep_alive_work(struct work_struct *work) 771 { 772 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 773 struct nvme_ctrl, ka_work); 774 775 if (nvme_keep_alive(ctrl)) { 776 /* allocation failure, reset the controller */ 777 dev_err(ctrl->device, "keep-alive failed\n"); 778 nvme_reset_ctrl(ctrl); 779 return; 780 } 781 } 782 783 void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 784 { 785 if (unlikely(ctrl->kato == 0)) 786 return; 787 788 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 789 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 790 } 791 EXPORT_SYMBOL_GPL(nvme_start_keep_alive); 792 793 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 794 { 795 if (unlikely(ctrl->kato == 0)) 796 return; 797 798 cancel_delayed_work_sync(&ctrl->ka_work); 799 } 800 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 801 802 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 803 { 804 struct nvme_command c = { }; 805 int error; 806 807 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 808 c.identify.opcode = nvme_admin_identify; 809 c.identify.cns = NVME_ID_CNS_CTRL; 810 811 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 812 if (!*id) 813 return -ENOMEM; 814 815 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 816 sizeof(struct nvme_id_ctrl)); 817 if (error) 818 kfree(*id); 819 return error; 820 } 821 822 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, 823 struct nvme_ns_ids *ids) 824 { 825 struct nvme_command c = { }; 826 int status; 827 void *data; 828 int pos; 829 int len; 830 831 c.identify.opcode = nvme_admin_identify; 832 c.identify.nsid = cpu_to_le32(nsid); 833 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 834 835 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 836 if (!data) 837 return -ENOMEM; 838 839 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 840 NVME_IDENTIFY_DATA_SIZE); 841 if (status) 842 goto free_data; 843 844 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 845 struct nvme_ns_id_desc *cur = data + pos; 846 847 if (cur->nidl == 0) 848 break; 849 850 switch (cur->nidt) { 851 case NVME_NIDT_EUI64: 852 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 853 dev_warn(ctrl->device, 854 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n", 855 cur->nidl); 856 goto free_data; 857 } 858 len = NVME_NIDT_EUI64_LEN; 859 memcpy(ids->eui64, data + pos + sizeof(*cur), len); 860 break; 861 case NVME_NIDT_NGUID: 862 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 863 dev_warn(ctrl->device, 864 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n", 865 cur->nidl); 866 goto free_data; 867 } 868 len = NVME_NIDT_NGUID_LEN; 869 memcpy(ids->nguid, data + pos + sizeof(*cur), len); 870 break; 871 case NVME_NIDT_UUID: 872 if (cur->nidl != NVME_NIDT_UUID_LEN) { 873 dev_warn(ctrl->device, 874 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n", 875 cur->nidl); 876 goto free_data; 877 } 878 len = NVME_NIDT_UUID_LEN; 879 uuid_copy(&ids->uuid, data + pos + sizeof(*cur)); 880 break; 881 default: 882 /* Skip unnkown types */ 883 len = cur->nidl; 884 break; 885 } 886 887 len += sizeof(*cur); 888 } 889 free_data: 890 kfree(data); 891 return status; 892 } 893 894 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) 895 { 896 struct nvme_command c = { }; 897 898 c.identify.opcode = nvme_admin_identify; 899 c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; 900 c.identify.nsid = cpu_to_le32(nsid); 901 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); 902 } 903 904 static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, 905 unsigned nsid) 906 { 907 struct nvme_id_ns *id; 908 struct nvme_command c = { }; 909 int error; 910 911 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 912 c.identify.opcode = nvme_admin_identify; 913 c.identify.nsid = cpu_to_le32(nsid); 914 c.identify.cns = NVME_ID_CNS_NS; 915 916 id = kmalloc(sizeof(*id), GFP_KERNEL); 917 if (!id) 918 return NULL; 919 920 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 921 if (error) { 922 dev_warn(ctrl->device, "Identify namespace failed\n"); 923 kfree(id); 924 return NULL; 925 } 926 927 return id; 928 } 929 930 static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, 931 void *buffer, size_t buflen, u32 *result) 932 { 933 struct nvme_command c; 934 union nvme_result res; 935 int ret; 936 937 memset(&c, 0, sizeof(c)); 938 c.features.opcode = nvme_admin_set_features; 939 c.features.fid = cpu_to_le32(fid); 940 c.features.dword11 = cpu_to_le32(dword11); 941 942 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 943 buffer, buflen, 0, NVME_QID_ANY, 0, 0); 944 if (ret >= 0 && result) 945 *result = le32_to_cpu(res.u32); 946 return ret; 947 } 948 949 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 950 { 951 u32 q_count = (*count - 1) | ((*count - 1) << 16); 952 u32 result; 953 int status, nr_io_queues; 954 955 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 956 &result); 957 if (status < 0) 958 return status; 959 960 /* 961 * Degraded controllers might return an error when setting the queue 962 * count. We still want to be able to bring them online and offer 963 * access to the admin queue, as that might be only way to fix them up. 964 */ 965 if (status > 0) { 966 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 967 *count = 0; 968 } else { 969 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 970 *count = min(*count, nr_io_queues); 971 } 972 973 return 0; 974 } 975 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 976 977 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 978 { 979 struct nvme_user_io io; 980 struct nvme_command c; 981 unsigned length, meta_len; 982 void __user *metadata; 983 984 if (copy_from_user(&io, uio, sizeof(io))) 985 return -EFAULT; 986 if (io.flags) 987 return -EINVAL; 988 989 switch (io.opcode) { 990 case nvme_cmd_write: 991 case nvme_cmd_read: 992 case nvme_cmd_compare: 993 break; 994 default: 995 return -EINVAL; 996 } 997 998 length = (io.nblocks + 1) << ns->lba_shift; 999 meta_len = (io.nblocks + 1) * ns->ms; 1000 metadata = (void __user *)(uintptr_t)io.metadata; 1001 1002 if (ns->ext) { 1003 length += meta_len; 1004 meta_len = 0; 1005 } else if (meta_len) { 1006 if ((io.metadata & 3) || !io.metadata) 1007 return -EINVAL; 1008 } 1009 1010 memset(&c, 0, sizeof(c)); 1011 c.rw.opcode = io.opcode; 1012 c.rw.flags = io.flags; 1013 c.rw.nsid = cpu_to_le32(ns->head->ns_id); 1014 c.rw.slba = cpu_to_le64(io.slba); 1015 c.rw.length = cpu_to_le16(io.nblocks); 1016 c.rw.control = cpu_to_le16(io.control); 1017 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 1018 c.rw.reftag = cpu_to_le32(io.reftag); 1019 c.rw.apptag = cpu_to_le16(io.apptag); 1020 c.rw.appmask = cpu_to_le16(io.appmask); 1021 1022 return nvme_submit_user_cmd(ns->queue, &c, 1023 (void __user *)(uintptr_t)io.addr, length, 1024 metadata, meta_len, io.slba, NULL, 0); 1025 } 1026 1027 static u32 nvme_known_admin_effects(u8 opcode) 1028 { 1029 switch (opcode) { 1030 case nvme_admin_format_nvm: 1031 return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | 1032 NVME_CMD_EFFECTS_CSE_MASK; 1033 case nvme_admin_sanitize_nvm: 1034 return NVME_CMD_EFFECTS_CSE_MASK; 1035 default: 1036 break; 1037 } 1038 return 0; 1039 } 1040 1041 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1042 u8 opcode) 1043 { 1044 u32 effects = 0; 1045 1046 if (ns) { 1047 if (ctrl->effects) 1048 effects = le32_to_cpu(ctrl->effects->iocs[opcode]); 1049 if (effects & ~NVME_CMD_EFFECTS_CSUPP) 1050 dev_warn(ctrl->device, 1051 "IO command:%02x has unhandled effects:%08x\n", 1052 opcode, effects); 1053 return 0; 1054 } 1055 1056 if (ctrl->effects) 1057 effects = le32_to_cpu(ctrl->effects->iocs[opcode]); 1058 else 1059 effects = nvme_known_admin_effects(opcode); 1060 1061 /* 1062 * For simplicity, IO to all namespaces is quiesced even if the command 1063 * effects say only one namespace is affected. 1064 */ 1065 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1066 nvme_start_freeze(ctrl); 1067 nvme_wait_freeze(ctrl); 1068 } 1069 return effects; 1070 } 1071 1072 static void nvme_update_formats(struct nvme_ctrl *ctrl) 1073 { 1074 struct nvme_ns *ns; 1075 1076 mutex_lock(&ctrl->namespaces_mutex); 1077 list_for_each_entry(ns, &ctrl->namespaces, list) { 1078 if (ns->disk && nvme_revalidate_disk(ns->disk)) 1079 nvme_ns_remove(ns); 1080 } 1081 mutex_unlock(&ctrl->namespaces_mutex); 1082 } 1083 1084 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1085 { 1086 /* 1087 * Revalidate LBA changes prior to unfreezing. This is necessary to 1088 * prevent memory corruption if a logical block size was changed by 1089 * this command. 1090 */ 1091 if (effects & NVME_CMD_EFFECTS_LBCC) 1092 nvme_update_formats(ctrl); 1093 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) 1094 nvme_unfreeze(ctrl); 1095 if (effects & NVME_CMD_EFFECTS_CCC) 1096 nvme_init_identify(ctrl); 1097 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) 1098 nvme_queue_scan(ctrl); 1099 } 1100 1101 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1102 struct nvme_passthru_cmd __user *ucmd) 1103 { 1104 struct nvme_passthru_cmd cmd; 1105 struct nvme_command c; 1106 unsigned timeout = 0; 1107 u32 effects; 1108 int status; 1109 1110 if (!capable(CAP_SYS_ADMIN)) 1111 return -EACCES; 1112 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1113 return -EFAULT; 1114 if (cmd.flags) 1115 return -EINVAL; 1116 1117 memset(&c, 0, sizeof(c)); 1118 c.common.opcode = cmd.opcode; 1119 c.common.flags = cmd.flags; 1120 c.common.nsid = cpu_to_le32(cmd.nsid); 1121 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1122 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1123 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 1124 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 1125 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 1126 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 1127 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 1128 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 1129 1130 if (cmd.timeout_ms) 1131 timeout = msecs_to_jiffies(cmd.timeout_ms); 1132 1133 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); 1134 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1135 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 1136 (void __user *)(uintptr_t)cmd.metadata, cmd.metadata, 1137 0, &cmd.result, timeout); 1138 nvme_passthru_end(ctrl, effects); 1139 1140 if (status >= 0) { 1141 if (put_user(cmd.result, &ucmd->result)) 1142 return -EFAULT; 1143 } 1144 1145 return status; 1146 } 1147 1148 /* 1149 * Issue ioctl requests on the first available path. Note that unlike normal 1150 * block layer requests we will not retry failed request on another controller. 1151 */ 1152 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, 1153 struct nvme_ns_head **head, int *srcu_idx) 1154 { 1155 #ifdef CONFIG_NVME_MULTIPATH 1156 if (disk->fops == &nvme_ns_head_ops) { 1157 *head = disk->private_data; 1158 *srcu_idx = srcu_read_lock(&(*head)->srcu); 1159 return nvme_find_path(*head); 1160 } 1161 #endif 1162 *head = NULL; 1163 *srcu_idx = -1; 1164 return disk->private_data; 1165 } 1166 1167 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) 1168 { 1169 if (head) 1170 srcu_read_unlock(&head->srcu, idx); 1171 } 1172 1173 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg) 1174 { 1175 switch (cmd) { 1176 case NVME_IOCTL_ID: 1177 force_successful_syscall_return(); 1178 return ns->head->ns_id; 1179 case NVME_IOCTL_ADMIN_CMD: 1180 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); 1181 case NVME_IOCTL_IO_CMD: 1182 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); 1183 case NVME_IOCTL_SUBMIT_IO: 1184 return nvme_submit_io(ns, (void __user *)arg); 1185 default: 1186 #ifdef CONFIG_NVM 1187 if (ns->ndev) 1188 return nvme_nvm_ioctl(ns, cmd, arg); 1189 #endif 1190 if (is_sed_ioctl(cmd)) 1191 return sed_ioctl(ns->ctrl->opal_dev, cmd, 1192 (void __user *) arg); 1193 return -ENOTTY; 1194 } 1195 } 1196 1197 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 1198 unsigned int cmd, unsigned long arg) 1199 { 1200 struct nvme_ns_head *head = NULL; 1201 struct nvme_ns *ns; 1202 int srcu_idx, ret; 1203 1204 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1205 if (unlikely(!ns)) 1206 ret = -EWOULDBLOCK; 1207 else 1208 ret = nvme_ns_ioctl(ns, cmd, arg); 1209 nvme_put_ns_from_disk(head, srcu_idx); 1210 return ret; 1211 } 1212 1213 static int nvme_open(struct block_device *bdev, fmode_t mode) 1214 { 1215 struct nvme_ns *ns = bdev->bd_disk->private_data; 1216 1217 #ifdef CONFIG_NVME_MULTIPATH 1218 /* should never be called due to GENHD_FL_HIDDEN */ 1219 if (WARN_ON_ONCE(ns->head->disk)) 1220 return -ENXIO; 1221 #endif 1222 if (!kref_get_unless_zero(&ns->kref)) 1223 return -ENXIO; 1224 return 0; 1225 } 1226 1227 static void nvme_release(struct gendisk *disk, fmode_t mode) 1228 { 1229 nvme_put_ns(disk->private_data); 1230 } 1231 1232 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1233 { 1234 /* some standard values */ 1235 geo->heads = 1 << 6; 1236 geo->sectors = 1 << 5; 1237 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1238 return 0; 1239 } 1240 1241 #ifdef CONFIG_BLK_DEV_INTEGRITY 1242 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) 1243 { 1244 struct blk_integrity integrity; 1245 1246 memset(&integrity, 0, sizeof(integrity)); 1247 switch (pi_type) { 1248 case NVME_NS_DPS_PI_TYPE3: 1249 integrity.profile = &t10_pi_type3_crc; 1250 integrity.tag_size = sizeof(u16) + sizeof(u32); 1251 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1252 break; 1253 case NVME_NS_DPS_PI_TYPE1: 1254 case NVME_NS_DPS_PI_TYPE2: 1255 integrity.profile = &t10_pi_type1_crc; 1256 integrity.tag_size = sizeof(u16); 1257 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1258 break; 1259 default: 1260 integrity.profile = NULL; 1261 break; 1262 } 1263 integrity.tuple_size = ms; 1264 blk_integrity_register(disk, &integrity); 1265 blk_queue_max_integrity_segments(disk->queue, 1); 1266 } 1267 #else 1268 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) 1269 { 1270 } 1271 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1272 1273 static void nvme_set_chunk_size(struct nvme_ns *ns) 1274 { 1275 u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9)); 1276 blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); 1277 } 1278 1279 static void nvme_config_discard(struct nvme_ctrl *ctrl, 1280 unsigned stream_alignment, struct request_queue *queue) 1281 { 1282 u32 size = queue_logical_block_size(queue); 1283 1284 if (stream_alignment) 1285 size *= stream_alignment; 1286 1287 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1288 NVME_DSM_MAX_RANGES); 1289 1290 queue->limits.discard_alignment = size; 1291 queue->limits.discard_granularity = size; 1292 1293 blk_queue_max_discard_sectors(queue, UINT_MAX); 1294 blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); 1295 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue); 1296 1297 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1298 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1299 } 1300 1301 static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, 1302 struct nvme_id_ns *id, struct nvme_ns_ids *ids) 1303 { 1304 memset(ids, 0, sizeof(*ids)); 1305 1306 if (ctrl->vs >= NVME_VS(1, 1, 0)) 1307 memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); 1308 if (ctrl->vs >= NVME_VS(1, 2, 0)) 1309 memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); 1310 if (ctrl->vs >= NVME_VS(1, 3, 0)) { 1311 /* Don't treat error as fatal we potentially 1312 * already have a NGUID or EUI-64 1313 */ 1314 if (nvme_identify_ns_descs(ctrl, nsid, ids)) 1315 dev_warn(ctrl->device, 1316 "%s: Identify Descriptors failed\n", __func__); 1317 } 1318 } 1319 1320 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) 1321 { 1322 return !uuid_is_null(&ids->uuid) || 1323 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) || 1324 memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 1325 } 1326 1327 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1328 { 1329 return uuid_equal(&a->uuid, &b->uuid) && 1330 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1331 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0; 1332 } 1333 1334 static void nvme_update_disk_info(struct gendisk *disk, 1335 struct nvme_ns *ns, struct nvme_id_ns *id) 1336 { 1337 sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9); 1338 unsigned stream_alignment = 0; 1339 1340 if (ns->ctrl->nr_streams && ns->sws && ns->sgs) 1341 stream_alignment = ns->sws * ns->sgs; 1342 1343 blk_mq_freeze_queue(disk->queue); 1344 blk_integrity_unregister(disk); 1345 1346 blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift); 1347 if (ns->ms && !ns->ext && 1348 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1349 nvme_init_integrity(disk, ns->ms, ns->pi_type); 1350 if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) 1351 capacity = 0; 1352 set_capacity(disk, capacity); 1353 1354 if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM) 1355 nvme_config_discard(ns->ctrl, stream_alignment, disk->queue); 1356 blk_mq_unfreeze_queue(disk->queue); 1357 } 1358 1359 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) 1360 { 1361 struct nvme_ns *ns = disk->private_data; 1362 1363 /* 1364 * If identify namespace failed, use default 512 byte block size so 1365 * block layer can use before failing read/write for 0 capacity. 1366 */ 1367 ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; 1368 if (ns->lba_shift == 0) 1369 ns->lba_shift = 9; 1370 ns->noiob = le16_to_cpu(id->noiob); 1371 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 1372 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1373 /* the PI implementation requires metadata equal t10 pi tuple size */ 1374 if (ns->ms == sizeof(struct t10_pi_tuple)) 1375 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1376 else 1377 ns->pi_type = 0; 1378 1379 if (ns->noiob) 1380 nvme_set_chunk_size(ns); 1381 nvme_update_disk_info(disk, ns, id); 1382 #ifdef CONFIG_NVME_MULTIPATH 1383 if (ns->head->disk) 1384 nvme_update_disk_info(ns->head->disk, ns, id); 1385 #endif 1386 } 1387 1388 static int nvme_revalidate_disk(struct gendisk *disk) 1389 { 1390 struct nvme_ns *ns = disk->private_data; 1391 struct nvme_ctrl *ctrl = ns->ctrl; 1392 struct nvme_id_ns *id; 1393 struct nvme_ns_ids ids; 1394 int ret = 0; 1395 1396 if (test_bit(NVME_NS_DEAD, &ns->flags)) { 1397 set_capacity(disk, 0); 1398 return -ENODEV; 1399 } 1400 1401 id = nvme_identify_ns(ctrl, ns->head->ns_id); 1402 if (!id) 1403 return -ENODEV; 1404 1405 if (id->ncap == 0) { 1406 ret = -ENODEV; 1407 goto out; 1408 } 1409 1410 __nvme_revalidate_disk(disk, id); 1411 nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); 1412 if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) { 1413 dev_err(ctrl->device, 1414 "identifiers changed for nsid %d\n", ns->head->ns_id); 1415 ret = -ENODEV; 1416 } 1417 1418 out: 1419 kfree(id); 1420 return ret; 1421 } 1422 1423 static char nvme_pr_type(enum pr_type type) 1424 { 1425 switch (type) { 1426 case PR_WRITE_EXCLUSIVE: 1427 return 1; 1428 case PR_EXCLUSIVE_ACCESS: 1429 return 2; 1430 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1431 return 3; 1432 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1433 return 4; 1434 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1435 return 5; 1436 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1437 return 6; 1438 default: 1439 return 0; 1440 } 1441 }; 1442 1443 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 1444 u64 key, u64 sa_key, u8 op) 1445 { 1446 struct nvme_ns_head *head = NULL; 1447 struct nvme_ns *ns; 1448 struct nvme_command c; 1449 int srcu_idx, ret; 1450 u8 data[16] = { 0, }; 1451 1452 put_unaligned_le64(key, &data[0]); 1453 put_unaligned_le64(sa_key, &data[8]); 1454 1455 memset(&c, 0, sizeof(c)); 1456 c.common.opcode = op; 1457 c.common.nsid = cpu_to_le32(head->ns_id); 1458 c.common.cdw10[0] = cpu_to_le32(cdw10); 1459 1460 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1461 if (unlikely(!ns)) 1462 ret = -EWOULDBLOCK; 1463 else 1464 ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); 1465 nvme_put_ns_from_disk(head, srcu_idx); 1466 return ret; 1467 } 1468 1469 static int nvme_pr_register(struct block_device *bdev, u64 old, 1470 u64 new, unsigned flags) 1471 { 1472 u32 cdw10; 1473 1474 if (flags & ~PR_FL_IGNORE_KEY) 1475 return -EOPNOTSUPP; 1476 1477 cdw10 = old ? 2 : 0; 1478 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 1479 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 1480 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 1481 } 1482 1483 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 1484 enum pr_type type, unsigned flags) 1485 { 1486 u32 cdw10; 1487 1488 if (flags & ~PR_FL_IGNORE_KEY) 1489 return -EOPNOTSUPP; 1490 1491 cdw10 = nvme_pr_type(type) << 8; 1492 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 1493 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 1494 } 1495 1496 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 1497 enum pr_type type, bool abort) 1498 { 1499 u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1; 1500 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 1501 } 1502 1503 static int nvme_pr_clear(struct block_device *bdev, u64 key) 1504 { 1505 u32 cdw10 = 1 | (key ? 1 << 3 : 0); 1506 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); 1507 } 1508 1509 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1510 { 1511 u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0; 1512 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 1513 } 1514 1515 static const struct pr_ops nvme_pr_ops = { 1516 .pr_register = nvme_pr_register, 1517 .pr_reserve = nvme_pr_reserve, 1518 .pr_release = nvme_pr_release, 1519 .pr_preempt = nvme_pr_preempt, 1520 .pr_clear = nvme_pr_clear, 1521 }; 1522 1523 #ifdef CONFIG_BLK_SED_OPAL 1524 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 1525 bool send) 1526 { 1527 struct nvme_ctrl *ctrl = data; 1528 struct nvme_command cmd; 1529 1530 memset(&cmd, 0, sizeof(cmd)); 1531 if (send) 1532 cmd.common.opcode = nvme_admin_security_send; 1533 else 1534 cmd.common.opcode = nvme_admin_security_recv; 1535 cmd.common.nsid = 0; 1536 cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 1537 cmd.common.cdw10[1] = cpu_to_le32(len); 1538 1539 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 1540 ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); 1541 } 1542 EXPORT_SYMBOL_GPL(nvme_sec_submit); 1543 #endif /* CONFIG_BLK_SED_OPAL */ 1544 1545 static const struct block_device_operations nvme_fops = { 1546 .owner = THIS_MODULE, 1547 .ioctl = nvme_ioctl, 1548 .compat_ioctl = nvme_ioctl, 1549 .open = nvme_open, 1550 .release = nvme_release, 1551 .getgeo = nvme_getgeo, 1552 .revalidate_disk= nvme_revalidate_disk, 1553 .pr_ops = &nvme_pr_ops, 1554 }; 1555 1556 #ifdef CONFIG_NVME_MULTIPATH 1557 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) 1558 { 1559 struct nvme_ns_head *head = bdev->bd_disk->private_data; 1560 1561 if (!kref_get_unless_zero(&head->ref)) 1562 return -ENXIO; 1563 return 0; 1564 } 1565 1566 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) 1567 { 1568 nvme_put_ns_head(disk->private_data); 1569 } 1570 1571 const struct block_device_operations nvme_ns_head_ops = { 1572 .owner = THIS_MODULE, 1573 .open = nvme_ns_head_open, 1574 .release = nvme_ns_head_release, 1575 .ioctl = nvme_ioctl, 1576 .compat_ioctl = nvme_ioctl, 1577 .getgeo = nvme_getgeo, 1578 .pr_ops = &nvme_pr_ops, 1579 }; 1580 #endif /* CONFIG_NVME_MULTIPATH */ 1581 1582 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 1583 { 1584 unsigned long timeout = 1585 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1586 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 1587 int ret; 1588 1589 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1590 if (csts == ~0) 1591 return -ENODEV; 1592 if ((csts & NVME_CSTS_RDY) == bit) 1593 break; 1594 1595 msleep(100); 1596 if (fatal_signal_pending(current)) 1597 return -EINTR; 1598 if (time_after(jiffies, timeout)) { 1599 dev_err(ctrl->device, 1600 "Device not ready; aborting %s\n", enabled ? 1601 "initialisation" : "reset"); 1602 return -ENODEV; 1603 } 1604 } 1605 1606 return ret; 1607 } 1608 1609 /* 1610 * If the device has been passed off to us in an enabled state, just clear 1611 * the enabled bit. The spec says we should set the 'shutdown notification 1612 * bits', but doing so may cause the device to complete commands to the 1613 * admin queue ... and we don't know what memory that might be pointing at! 1614 */ 1615 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1616 { 1617 int ret; 1618 1619 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1620 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 1621 1622 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1623 if (ret) 1624 return ret; 1625 1626 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 1627 msleep(NVME_QUIRK_DELAY_AMOUNT); 1628 1629 return nvme_wait_ready(ctrl, cap, false); 1630 } 1631 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 1632 1633 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1634 { 1635 /* 1636 * Default to a 4K page size, with the intention to update this 1637 * path in the future to accomodate architectures with differing 1638 * kernel and IO page sizes. 1639 */ 1640 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12; 1641 int ret; 1642 1643 if (page_shift < dev_page_min) { 1644 dev_err(ctrl->device, 1645 "Minimum device page size %u too large for host (%u)\n", 1646 1 << dev_page_min, 1 << page_shift); 1647 return -ENODEV; 1648 } 1649 1650 ctrl->page_size = 1 << page_shift; 1651 1652 ctrl->ctrl_config = NVME_CC_CSS_NVM; 1653 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; 1654 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 1655 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1656 ctrl->ctrl_config |= NVME_CC_ENABLE; 1657 1658 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1659 if (ret) 1660 return ret; 1661 return nvme_wait_ready(ctrl, cap, true); 1662 } 1663 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 1664 1665 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 1666 { 1667 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); 1668 u32 csts; 1669 int ret; 1670 1671 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1672 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 1673 1674 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1675 if (ret) 1676 return ret; 1677 1678 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1679 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 1680 break; 1681 1682 msleep(100); 1683 if (fatal_signal_pending(current)) 1684 return -EINTR; 1685 if (time_after(jiffies, timeout)) { 1686 dev_err(ctrl->device, 1687 "Device shutdown incomplete; abort shutdown\n"); 1688 return -ENODEV; 1689 } 1690 } 1691 1692 return ret; 1693 } 1694 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 1695 1696 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1697 struct request_queue *q) 1698 { 1699 bool vwc = false; 1700 1701 if (ctrl->max_hw_sectors) { 1702 u32 max_segments = 1703 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 1704 1705 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1706 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1707 } 1708 if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) 1709 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); 1710 blk_queue_virt_boundary(q, ctrl->page_size - 1); 1711 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 1712 vwc = true; 1713 blk_queue_write_cache(q, vwc, vwc); 1714 } 1715 1716 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 1717 { 1718 __le64 ts; 1719 int ret; 1720 1721 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 1722 return 0; 1723 1724 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 1725 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 1726 NULL); 1727 if (ret) 1728 dev_warn_once(ctrl->device, 1729 "could not set timestamp (%d)\n", ret); 1730 return ret; 1731 } 1732 1733 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 1734 { 1735 /* 1736 * APST (Autonomous Power State Transition) lets us program a 1737 * table of power state transitions that the controller will 1738 * perform automatically. We configure it with a simple 1739 * heuristic: we are willing to spend at most 2% of the time 1740 * transitioning between power states. Therefore, when running 1741 * in any given state, we will enter the next lower-power 1742 * non-operational state after waiting 50 * (enlat + exlat) 1743 * microseconds, as long as that state's exit latency is under 1744 * the requested maximum latency. 1745 * 1746 * We will not autonomously enter any non-operational state for 1747 * which the total latency exceeds ps_max_latency_us. Users 1748 * can set ps_max_latency_us to zero to turn off APST. 1749 */ 1750 1751 unsigned apste; 1752 struct nvme_feat_auto_pst *table; 1753 u64 max_lat_us = 0; 1754 int max_ps = -1; 1755 int ret; 1756 1757 /* 1758 * If APST isn't supported or if we haven't been initialized yet, 1759 * then don't do anything. 1760 */ 1761 if (!ctrl->apsta) 1762 return 0; 1763 1764 if (ctrl->npss > 31) { 1765 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 1766 return 0; 1767 } 1768 1769 table = kzalloc(sizeof(*table), GFP_KERNEL); 1770 if (!table) 1771 return 0; 1772 1773 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 1774 /* Turn off APST. */ 1775 apste = 0; 1776 dev_dbg(ctrl->device, "APST disabled\n"); 1777 } else { 1778 __le64 target = cpu_to_le64(0); 1779 int state; 1780 1781 /* 1782 * Walk through all states from lowest- to highest-power. 1783 * According to the spec, lower-numbered states use more 1784 * power. NPSS, despite the name, is the index of the 1785 * lowest-power state, not the number of states. 1786 */ 1787 for (state = (int)ctrl->npss; state >= 0; state--) { 1788 u64 total_latency_us, exit_latency_us, transition_ms; 1789 1790 if (target) 1791 table->entries[state] = target; 1792 1793 /* 1794 * Don't allow transitions to the deepest state 1795 * if it's quirked off. 1796 */ 1797 if (state == ctrl->npss && 1798 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 1799 continue; 1800 1801 /* 1802 * Is this state a useful non-operational state for 1803 * higher-power states to autonomously transition to? 1804 */ 1805 if (!(ctrl->psd[state].flags & 1806 NVME_PS_FLAGS_NON_OP_STATE)) 1807 continue; 1808 1809 exit_latency_us = 1810 (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 1811 if (exit_latency_us > ctrl->ps_max_latency_us) 1812 continue; 1813 1814 total_latency_us = 1815 exit_latency_us + 1816 le32_to_cpu(ctrl->psd[state].entry_lat); 1817 1818 /* 1819 * This state is good. Use it as the APST idle 1820 * target for higher power states. 1821 */ 1822 transition_ms = total_latency_us + 19; 1823 do_div(transition_ms, 20); 1824 if (transition_ms > (1 << 24) - 1) 1825 transition_ms = (1 << 24) - 1; 1826 1827 target = cpu_to_le64((state << 3) | 1828 (transition_ms << 8)); 1829 1830 if (max_ps == -1) 1831 max_ps = state; 1832 1833 if (total_latency_us > max_lat_us) 1834 max_lat_us = total_latency_us; 1835 } 1836 1837 apste = 1; 1838 1839 if (max_ps == -1) { 1840 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 1841 } else { 1842 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 1843 max_ps, max_lat_us, (int)sizeof(*table), table); 1844 } 1845 } 1846 1847 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 1848 table, sizeof(*table), NULL); 1849 if (ret) 1850 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 1851 1852 kfree(table); 1853 return ret; 1854 } 1855 1856 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 1857 { 1858 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1859 u64 latency; 1860 1861 switch (val) { 1862 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 1863 case PM_QOS_LATENCY_ANY: 1864 latency = U64_MAX; 1865 break; 1866 1867 default: 1868 latency = val; 1869 } 1870 1871 if (ctrl->ps_max_latency_us != latency) { 1872 ctrl->ps_max_latency_us = latency; 1873 nvme_configure_apst(ctrl); 1874 } 1875 } 1876 1877 struct nvme_core_quirk_entry { 1878 /* 1879 * NVMe model and firmware strings are padded with spaces. For 1880 * simplicity, strings in the quirk table are padded with NULLs 1881 * instead. 1882 */ 1883 u16 vid; 1884 const char *mn; 1885 const char *fr; 1886 unsigned long quirks; 1887 }; 1888 1889 static const struct nvme_core_quirk_entry core_quirks[] = { 1890 { 1891 /* 1892 * This Toshiba device seems to die using any APST states. See: 1893 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 1894 */ 1895 .vid = 0x1179, 1896 .mn = "THNSF5256GPUK TOSHIBA", 1897 .quirks = NVME_QUIRK_NO_APST, 1898 } 1899 }; 1900 1901 /* match is null-terminated but idstr is space-padded. */ 1902 static bool string_matches(const char *idstr, const char *match, size_t len) 1903 { 1904 size_t matchlen; 1905 1906 if (!match) 1907 return true; 1908 1909 matchlen = strlen(match); 1910 WARN_ON_ONCE(matchlen > len); 1911 1912 if (memcmp(idstr, match, matchlen)) 1913 return false; 1914 1915 for (; matchlen < len; matchlen++) 1916 if (idstr[matchlen] != ' ') 1917 return false; 1918 1919 return true; 1920 } 1921 1922 static bool quirk_matches(const struct nvme_id_ctrl *id, 1923 const struct nvme_core_quirk_entry *q) 1924 { 1925 return q->vid == le16_to_cpu(id->vid) && 1926 string_matches(id->mn, q->mn, sizeof(id->mn)) && 1927 string_matches(id->fr, q->fr, sizeof(id->fr)); 1928 } 1929 1930 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 1931 struct nvme_id_ctrl *id) 1932 { 1933 size_t nqnlen; 1934 int off; 1935 1936 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 1937 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 1938 strncpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 1939 return; 1940 } 1941 1942 if (ctrl->vs >= NVME_VS(1, 2, 1)) 1943 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 1944 1945 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 1946 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 1947 "nqn.2014.08.org.nvmexpress:%4x%4x", 1948 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 1949 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 1950 off += sizeof(id->sn); 1951 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 1952 off += sizeof(id->mn); 1953 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 1954 } 1955 1956 static void __nvme_release_subsystem(struct nvme_subsystem *subsys) 1957 { 1958 ida_simple_remove(&nvme_subsystems_ida, subsys->instance); 1959 kfree(subsys); 1960 } 1961 1962 static void nvme_release_subsystem(struct device *dev) 1963 { 1964 __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev)); 1965 } 1966 1967 static void nvme_destroy_subsystem(struct kref *ref) 1968 { 1969 struct nvme_subsystem *subsys = 1970 container_of(ref, struct nvme_subsystem, ref); 1971 1972 mutex_lock(&nvme_subsystems_lock); 1973 list_del(&subsys->entry); 1974 mutex_unlock(&nvme_subsystems_lock); 1975 1976 ida_destroy(&subsys->ns_ida); 1977 device_del(&subsys->dev); 1978 put_device(&subsys->dev); 1979 } 1980 1981 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 1982 { 1983 kref_put(&subsys->ref, nvme_destroy_subsystem); 1984 } 1985 1986 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 1987 { 1988 struct nvme_subsystem *subsys; 1989 1990 lockdep_assert_held(&nvme_subsystems_lock); 1991 1992 list_for_each_entry(subsys, &nvme_subsystems, entry) { 1993 if (strcmp(subsys->subnqn, subsysnqn)) 1994 continue; 1995 if (!kref_get_unless_zero(&subsys->ref)) 1996 continue; 1997 return subsys; 1998 } 1999 2000 return NULL; 2001 } 2002 2003 #define SUBSYS_ATTR_RO(_name, _mode, _show) \ 2004 struct device_attribute subsys_attr_##_name = \ 2005 __ATTR(_name, _mode, _show, NULL) 2006 2007 static ssize_t nvme_subsys_show_nqn(struct device *dev, 2008 struct device_attribute *attr, 2009 char *buf) 2010 { 2011 struct nvme_subsystem *subsys = 2012 container_of(dev, struct nvme_subsystem, dev); 2013 2014 return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); 2015 } 2016 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2017 2018 #define nvme_subsys_show_str_function(field) \ 2019 static ssize_t subsys_##field##_show(struct device *dev, \ 2020 struct device_attribute *attr, char *buf) \ 2021 { \ 2022 struct nvme_subsystem *subsys = \ 2023 container_of(dev, struct nvme_subsystem, dev); \ 2024 return sprintf(buf, "%.*s\n", \ 2025 (int)sizeof(subsys->field), subsys->field); \ 2026 } \ 2027 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 2028 2029 nvme_subsys_show_str_function(model); 2030 nvme_subsys_show_str_function(serial); 2031 nvme_subsys_show_str_function(firmware_rev); 2032 2033 static struct attribute *nvme_subsys_attrs[] = { 2034 &subsys_attr_model.attr, 2035 &subsys_attr_serial.attr, 2036 &subsys_attr_firmware_rev.attr, 2037 &subsys_attr_subsysnqn.attr, 2038 NULL, 2039 }; 2040 2041 static struct attribute_group nvme_subsys_attrs_group = { 2042 .attrs = nvme_subsys_attrs, 2043 }; 2044 2045 static const struct attribute_group *nvme_subsys_attrs_groups[] = { 2046 &nvme_subsys_attrs_group, 2047 NULL, 2048 }; 2049 2050 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2051 { 2052 struct nvme_subsystem *subsys, *found; 2053 int ret; 2054 2055 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2056 if (!subsys) 2057 return -ENOMEM; 2058 ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL); 2059 if (ret < 0) { 2060 kfree(subsys); 2061 return ret; 2062 } 2063 subsys->instance = ret; 2064 mutex_init(&subsys->lock); 2065 kref_init(&subsys->ref); 2066 INIT_LIST_HEAD(&subsys->ctrls); 2067 INIT_LIST_HEAD(&subsys->nsheads); 2068 nvme_init_subnqn(subsys, ctrl, id); 2069 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2070 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2071 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); 2072 subsys->vendor_id = le16_to_cpu(id->vid); 2073 subsys->cmic = id->cmic; 2074 2075 subsys->dev.class = nvme_subsys_class; 2076 subsys->dev.release = nvme_release_subsystem; 2077 subsys->dev.groups = nvme_subsys_attrs_groups; 2078 dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance); 2079 device_initialize(&subsys->dev); 2080 2081 mutex_lock(&nvme_subsystems_lock); 2082 found = __nvme_find_get_subsystem(subsys->subnqn); 2083 if (found) { 2084 /* 2085 * Verify that the subsystem actually supports multiple 2086 * controllers, else bail out. 2087 */ 2088 if (!(id->cmic & (1 << 1))) { 2089 dev_err(ctrl->device, 2090 "ignoring ctrl due to duplicate subnqn (%s).\n", 2091 found->subnqn); 2092 nvme_put_subsystem(found); 2093 ret = -EINVAL; 2094 goto out_unlock; 2095 } 2096 2097 __nvme_release_subsystem(subsys); 2098 subsys = found; 2099 } else { 2100 ret = device_add(&subsys->dev); 2101 if (ret) { 2102 dev_err(ctrl->device, 2103 "failed to register subsystem device.\n"); 2104 goto out_unlock; 2105 } 2106 ida_init(&subsys->ns_ida); 2107 list_add_tail(&subsys->entry, &nvme_subsystems); 2108 } 2109 2110 ctrl->subsys = subsys; 2111 mutex_unlock(&nvme_subsystems_lock); 2112 2113 if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2114 dev_name(ctrl->device))) { 2115 dev_err(ctrl->device, 2116 "failed to create sysfs link from subsystem.\n"); 2117 /* the transport driver will eventually put the subsystem */ 2118 return -EINVAL; 2119 } 2120 2121 mutex_lock(&subsys->lock); 2122 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2123 mutex_unlock(&subsys->lock); 2124 2125 return 0; 2126 2127 out_unlock: 2128 mutex_unlock(&nvme_subsystems_lock); 2129 put_device(&subsys->dev); 2130 return ret; 2131 } 2132 2133 static int nvme_get_log(struct nvme_ctrl *ctrl, u8 log_page, void *log, 2134 size_t size) 2135 { 2136 struct nvme_command c = { }; 2137 2138 c.common.opcode = nvme_admin_get_log_page; 2139 c.common.nsid = cpu_to_le32(NVME_NSID_ALL); 2140 c.common.cdw10[0] = nvme_get_log_dw10(log_page, size); 2141 2142 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2143 } 2144 2145 static int nvme_get_effects_log(struct nvme_ctrl *ctrl) 2146 { 2147 int ret; 2148 2149 if (!ctrl->effects) 2150 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); 2151 2152 if (!ctrl->effects) 2153 return 0; 2154 2155 ret = nvme_get_log(ctrl, NVME_LOG_CMD_EFFECTS, ctrl->effects, 2156 sizeof(*ctrl->effects)); 2157 if (ret) { 2158 kfree(ctrl->effects); 2159 ctrl->effects = NULL; 2160 } 2161 return ret; 2162 } 2163 2164 /* 2165 * Initialize the cached copies of the Identify data and various controller 2166 * register in our nvme_ctrl structure. This should be called as soon as 2167 * the admin queue is fully up and running. 2168 */ 2169 int nvme_init_identify(struct nvme_ctrl *ctrl) 2170 { 2171 struct nvme_id_ctrl *id; 2172 u64 cap; 2173 int ret, page_shift; 2174 u32 max_hw_sectors; 2175 bool prev_apst_enabled; 2176 2177 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 2178 if (ret) { 2179 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 2180 return ret; 2181 } 2182 2183 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); 2184 if (ret) { 2185 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2186 return ret; 2187 } 2188 page_shift = NVME_CAP_MPSMIN(cap) + 12; 2189 2190 if (ctrl->vs >= NVME_VS(1, 1, 0)) 2191 ctrl->subsystem = NVME_CAP_NSSRC(cap); 2192 2193 ret = nvme_identify_ctrl(ctrl, &id); 2194 if (ret) { 2195 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 2196 return -EIO; 2197 } 2198 2199 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 2200 ret = nvme_get_effects_log(ctrl); 2201 if (ret < 0) 2202 return ret; 2203 } 2204 2205 if (!ctrl->identified) { 2206 int i; 2207 2208 ret = nvme_init_subsystem(ctrl, id); 2209 if (ret) 2210 goto out_free; 2211 2212 /* 2213 * Check for quirks. Quirk can depend on firmware version, 2214 * so, in principle, the set of quirks present can change 2215 * across a reset. As a possible future enhancement, we 2216 * could re-scan for quirks every time we reinitialize 2217 * the device, but we'd have to make sure that the driver 2218 * behaves intelligently if the quirks change. 2219 */ 2220 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 2221 if (quirk_matches(id, &core_quirks[i])) 2222 ctrl->quirks |= core_quirks[i].quirks; 2223 } 2224 } 2225 2226 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 2227 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 2228 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 2229 } 2230 2231 ctrl->oacs = le16_to_cpu(id->oacs); 2232 ctrl->oncs = le16_to_cpup(&id->oncs); 2233 atomic_set(&ctrl->abort_limit, id->acl + 1); 2234 ctrl->vwc = id->vwc; 2235 ctrl->cntlid = le16_to_cpup(&id->cntlid); 2236 if (id->mdts) 2237 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 2238 else 2239 max_hw_sectors = UINT_MAX; 2240 ctrl->max_hw_sectors = 2241 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 2242 2243 nvme_set_queue_limits(ctrl, ctrl->admin_q); 2244 ctrl->sgls = le32_to_cpu(id->sgls); 2245 ctrl->kas = le16_to_cpu(id->kas); 2246 2247 if (id->rtd3e) { 2248 /* us -> s */ 2249 u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000; 2250 2251 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 2252 shutdown_timeout, 60); 2253 2254 if (ctrl->shutdown_timeout != shutdown_timeout) 2255 dev_warn(ctrl->device, 2256 "Shutdown timeout set to %u seconds\n", 2257 ctrl->shutdown_timeout); 2258 } else 2259 ctrl->shutdown_timeout = shutdown_timeout; 2260 2261 ctrl->npss = id->npss; 2262 ctrl->apsta = id->apsta; 2263 prev_apst_enabled = ctrl->apst_enabled; 2264 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 2265 if (force_apst && id->apsta) { 2266 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 2267 ctrl->apst_enabled = true; 2268 } else { 2269 ctrl->apst_enabled = false; 2270 } 2271 } else { 2272 ctrl->apst_enabled = id->apsta; 2273 } 2274 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 2275 2276 if (ctrl->ops->flags & NVME_F_FABRICS) { 2277 ctrl->icdoff = le16_to_cpu(id->icdoff); 2278 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 2279 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 2280 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 2281 2282 /* 2283 * In fabrics we need to verify the cntlid matches the 2284 * admin connect 2285 */ 2286 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 2287 ret = -EINVAL; 2288 goto out_free; 2289 } 2290 2291 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 2292 dev_err(ctrl->device, 2293 "keep-alive support is mandatory for fabrics\n"); 2294 ret = -EINVAL; 2295 goto out_free; 2296 } 2297 } else { 2298 ctrl->cntlid = le16_to_cpu(id->cntlid); 2299 ctrl->hmpre = le32_to_cpu(id->hmpre); 2300 ctrl->hmmin = le32_to_cpu(id->hmmin); 2301 ctrl->hmminds = le32_to_cpu(id->hmminds); 2302 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 2303 } 2304 2305 kfree(id); 2306 2307 if (ctrl->apst_enabled && !prev_apst_enabled) 2308 dev_pm_qos_expose_latency_tolerance(ctrl->device); 2309 else if (!ctrl->apst_enabled && prev_apst_enabled) 2310 dev_pm_qos_hide_latency_tolerance(ctrl->device); 2311 2312 ret = nvme_configure_apst(ctrl); 2313 if (ret < 0) 2314 return ret; 2315 2316 ret = nvme_configure_timestamp(ctrl); 2317 if (ret < 0) 2318 return ret; 2319 2320 ret = nvme_configure_directives(ctrl); 2321 if (ret < 0) 2322 return ret; 2323 2324 ctrl->identified = true; 2325 2326 return 0; 2327 2328 out_free: 2329 kfree(id); 2330 return ret; 2331 } 2332 EXPORT_SYMBOL_GPL(nvme_init_identify); 2333 2334 static int nvme_dev_open(struct inode *inode, struct file *file) 2335 { 2336 struct nvme_ctrl *ctrl = 2337 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 2338 2339 if (ctrl->state != NVME_CTRL_LIVE) 2340 return -EWOULDBLOCK; 2341 file->private_data = ctrl; 2342 return 0; 2343 } 2344 2345 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 2346 { 2347 struct nvme_ns *ns; 2348 int ret; 2349 2350 mutex_lock(&ctrl->namespaces_mutex); 2351 if (list_empty(&ctrl->namespaces)) { 2352 ret = -ENOTTY; 2353 goto out_unlock; 2354 } 2355 2356 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 2357 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 2358 dev_warn(ctrl->device, 2359 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 2360 ret = -EINVAL; 2361 goto out_unlock; 2362 } 2363 2364 dev_warn(ctrl->device, 2365 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 2366 kref_get(&ns->kref); 2367 mutex_unlock(&ctrl->namespaces_mutex); 2368 2369 ret = nvme_user_cmd(ctrl, ns, argp); 2370 nvme_put_ns(ns); 2371 return ret; 2372 2373 out_unlock: 2374 mutex_unlock(&ctrl->namespaces_mutex); 2375 return ret; 2376 } 2377 2378 static long nvme_dev_ioctl(struct file *file, unsigned int cmd, 2379 unsigned long arg) 2380 { 2381 struct nvme_ctrl *ctrl = file->private_data; 2382 void __user *argp = (void __user *)arg; 2383 2384 switch (cmd) { 2385 case NVME_IOCTL_ADMIN_CMD: 2386 return nvme_user_cmd(ctrl, NULL, argp); 2387 case NVME_IOCTL_IO_CMD: 2388 return nvme_dev_user_cmd(ctrl, argp); 2389 case NVME_IOCTL_RESET: 2390 dev_warn(ctrl->device, "resetting controller\n"); 2391 return nvme_reset_ctrl_sync(ctrl); 2392 case NVME_IOCTL_SUBSYS_RESET: 2393 return nvme_reset_subsystem(ctrl); 2394 case NVME_IOCTL_RESCAN: 2395 nvme_queue_scan(ctrl); 2396 return 0; 2397 default: 2398 return -ENOTTY; 2399 } 2400 } 2401 2402 static const struct file_operations nvme_dev_fops = { 2403 .owner = THIS_MODULE, 2404 .open = nvme_dev_open, 2405 .unlocked_ioctl = nvme_dev_ioctl, 2406 .compat_ioctl = nvme_dev_ioctl, 2407 }; 2408 2409 static ssize_t nvme_sysfs_reset(struct device *dev, 2410 struct device_attribute *attr, const char *buf, 2411 size_t count) 2412 { 2413 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2414 int ret; 2415 2416 ret = nvme_reset_ctrl_sync(ctrl); 2417 if (ret < 0) 2418 return ret; 2419 return count; 2420 } 2421 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 2422 2423 static ssize_t nvme_sysfs_rescan(struct device *dev, 2424 struct device_attribute *attr, const char *buf, 2425 size_t count) 2426 { 2427 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2428 2429 nvme_queue_scan(ctrl); 2430 return count; 2431 } 2432 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 2433 2434 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 2435 { 2436 struct gendisk *disk = dev_to_disk(dev); 2437 2438 if (disk->fops == &nvme_fops) 2439 return nvme_get_ns_from_dev(dev)->head; 2440 else 2441 return disk->private_data; 2442 } 2443 2444 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 2445 char *buf) 2446 { 2447 struct nvme_ns_head *head = dev_to_ns_head(dev); 2448 struct nvme_ns_ids *ids = &head->ids; 2449 struct nvme_subsystem *subsys = head->subsys; 2450 int serial_len = sizeof(subsys->serial); 2451 int model_len = sizeof(subsys->model); 2452 2453 if (!uuid_is_null(&ids->uuid)) 2454 return sprintf(buf, "uuid.%pU\n", &ids->uuid); 2455 2456 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2457 return sprintf(buf, "eui.%16phN\n", ids->nguid); 2458 2459 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 2460 return sprintf(buf, "eui.%8phN\n", ids->eui64); 2461 2462 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 2463 subsys->serial[serial_len - 1] == '\0')) 2464 serial_len--; 2465 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 2466 subsys->model[model_len - 1] == '\0')) 2467 model_len--; 2468 2469 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 2470 serial_len, subsys->serial, model_len, subsys->model, 2471 head->ns_id); 2472 } 2473 static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL); 2474 2475 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 2476 char *buf) 2477 { 2478 return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 2479 } 2480 static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL); 2481 2482 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 2483 char *buf) 2484 { 2485 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 2486 2487 /* For backward compatibility expose the NGUID to userspace if 2488 * we have no UUID set 2489 */ 2490 if (uuid_is_null(&ids->uuid)) { 2491 printk_ratelimited(KERN_WARNING 2492 "No UUID available providing old NGUID\n"); 2493 return sprintf(buf, "%pU\n", ids->nguid); 2494 } 2495 return sprintf(buf, "%pU\n", &ids->uuid); 2496 } 2497 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL); 2498 2499 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 2500 char *buf) 2501 { 2502 return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 2503 } 2504 static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL); 2505 2506 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 2507 char *buf) 2508 { 2509 return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 2510 } 2511 static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL); 2512 2513 static struct attribute *nvme_ns_id_attrs[] = { 2514 &dev_attr_wwid.attr, 2515 &dev_attr_uuid.attr, 2516 &dev_attr_nguid.attr, 2517 &dev_attr_eui.attr, 2518 &dev_attr_nsid.attr, 2519 NULL, 2520 }; 2521 2522 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, 2523 struct attribute *a, int n) 2524 { 2525 struct device *dev = container_of(kobj, struct device, kobj); 2526 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 2527 2528 if (a == &dev_attr_uuid.attr) { 2529 if (uuid_is_null(&ids->uuid) && 2530 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2531 return 0; 2532 } 2533 if (a == &dev_attr_nguid.attr) { 2534 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2535 return 0; 2536 } 2537 if (a == &dev_attr_eui.attr) { 2538 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 2539 return 0; 2540 } 2541 return a->mode; 2542 } 2543 2544 const struct attribute_group nvme_ns_id_attr_group = { 2545 .attrs = nvme_ns_id_attrs, 2546 .is_visible = nvme_ns_id_attrs_are_visible, 2547 }; 2548 2549 #define nvme_show_str_function(field) \ 2550 static ssize_t field##_show(struct device *dev, \ 2551 struct device_attribute *attr, char *buf) \ 2552 { \ 2553 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 2554 return sprintf(buf, "%.*s\n", \ 2555 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 2556 } \ 2557 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 2558 2559 nvme_show_str_function(model); 2560 nvme_show_str_function(serial); 2561 nvme_show_str_function(firmware_rev); 2562 2563 #define nvme_show_int_function(field) \ 2564 static ssize_t field##_show(struct device *dev, \ 2565 struct device_attribute *attr, char *buf) \ 2566 { \ 2567 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 2568 return sprintf(buf, "%d\n", ctrl->field); \ 2569 } \ 2570 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 2571 2572 nvme_show_int_function(cntlid); 2573 2574 static ssize_t nvme_sysfs_delete(struct device *dev, 2575 struct device_attribute *attr, const char *buf, 2576 size_t count) 2577 { 2578 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2579 2580 if (device_remove_file_self(dev, attr)) 2581 nvme_delete_ctrl_sync(ctrl); 2582 return count; 2583 } 2584 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 2585 2586 static ssize_t nvme_sysfs_show_transport(struct device *dev, 2587 struct device_attribute *attr, 2588 char *buf) 2589 { 2590 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2591 2592 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); 2593 } 2594 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 2595 2596 static ssize_t nvme_sysfs_show_state(struct device *dev, 2597 struct device_attribute *attr, 2598 char *buf) 2599 { 2600 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2601 static const char *const state_name[] = { 2602 [NVME_CTRL_NEW] = "new", 2603 [NVME_CTRL_LIVE] = "live", 2604 [NVME_CTRL_RESETTING] = "resetting", 2605 [NVME_CTRL_RECONNECTING]= "reconnecting", 2606 [NVME_CTRL_DELETING] = "deleting", 2607 [NVME_CTRL_DEAD] = "dead", 2608 }; 2609 2610 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 2611 state_name[ctrl->state]) 2612 return sprintf(buf, "%s\n", state_name[ctrl->state]); 2613 2614 return sprintf(buf, "unknown state\n"); 2615 } 2616 2617 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 2618 2619 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 2620 struct device_attribute *attr, 2621 char *buf) 2622 { 2623 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2624 2625 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); 2626 } 2627 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 2628 2629 static ssize_t nvme_sysfs_show_address(struct device *dev, 2630 struct device_attribute *attr, 2631 char *buf) 2632 { 2633 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2634 2635 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 2636 } 2637 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 2638 2639 static struct attribute *nvme_dev_attrs[] = { 2640 &dev_attr_reset_controller.attr, 2641 &dev_attr_rescan_controller.attr, 2642 &dev_attr_model.attr, 2643 &dev_attr_serial.attr, 2644 &dev_attr_firmware_rev.attr, 2645 &dev_attr_cntlid.attr, 2646 &dev_attr_delete_controller.attr, 2647 &dev_attr_transport.attr, 2648 &dev_attr_subsysnqn.attr, 2649 &dev_attr_address.attr, 2650 &dev_attr_state.attr, 2651 NULL 2652 }; 2653 2654 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 2655 struct attribute *a, int n) 2656 { 2657 struct device *dev = container_of(kobj, struct device, kobj); 2658 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2659 2660 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 2661 return 0; 2662 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 2663 return 0; 2664 2665 return a->mode; 2666 } 2667 2668 static struct attribute_group nvme_dev_attrs_group = { 2669 .attrs = nvme_dev_attrs, 2670 .is_visible = nvme_dev_attrs_are_visible, 2671 }; 2672 2673 static const struct attribute_group *nvme_dev_attr_groups[] = { 2674 &nvme_dev_attrs_group, 2675 NULL, 2676 }; 2677 2678 static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys, 2679 unsigned nsid) 2680 { 2681 struct nvme_ns_head *h; 2682 2683 lockdep_assert_held(&subsys->lock); 2684 2685 list_for_each_entry(h, &subsys->nsheads, entry) { 2686 if (h->ns_id == nsid && kref_get_unless_zero(&h->ref)) 2687 return h; 2688 } 2689 2690 return NULL; 2691 } 2692 2693 static int __nvme_check_ids(struct nvme_subsystem *subsys, 2694 struct nvme_ns_head *new) 2695 { 2696 struct nvme_ns_head *h; 2697 2698 lockdep_assert_held(&subsys->lock); 2699 2700 list_for_each_entry(h, &subsys->nsheads, entry) { 2701 if (nvme_ns_ids_valid(&new->ids) && 2702 nvme_ns_ids_equal(&new->ids, &h->ids)) 2703 return -EINVAL; 2704 } 2705 2706 return 0; 2707 } 2708 2709 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 2710 unsigned nsid, struct nvme_id_ns *id) 2711 { 2712 struct nvme_ns_head *head; 2713 int ret = -ENOMEM; 2714 2715 head = kzalloc(sizeof(*head), GFP_KERNEL); 2716 if (!head) 2717 goto out; 2718 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); 2719 if (ret < 0) 2720 goto out_free_head; 2721 head->instance = ret; 2722 INIT_LIST_HEAD(&head->list); 2723 init_srcu_struct(&head->srcu); 2724 head->subsys = ctrl->subsys; 2725 head->ns_id = nsid; 2726 kref_init(&head->ref); 2727 2728 nvme_report_ns_ids(ctrl, nsid, id, &head->ids); 2729 2730 ret = __nvme_check_ids(ctrl->subsys, head); 2731 if (ret) { 2732 dev_err(ctrl->device, 2733 "duplicate IDs for nsid %d\n", nsid); 2734 goto out_cleanup_srcu; 2735 } 2736 2737 ret = nvme_mpath_alloc_disk(ctrl, head); 2738 if (ret) 2739 goto out_cleanup_srcu; 2740 2741 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 2742 return head; 2743 out_cleanup_srcu: 2744 cleanup_srcu_struct(&head->srcu); 2745 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); 2746 out_free_head: 2747 kfree(head); 2748 out: 2749 return ERR_PTR(ret); 2750 } 2751 2752 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, 2753 struct nvme_id_ns *id, bool *new) 2754 { 2755 struct nvme_ctrl *ctrl = ns->ctrl; 2756 bool is_shared = id->nmic & (1 << 0); 2757 struct nvme_ns_head *head = NULL; 2758 int ret = 0; 2759 2760 mutex_lock(&ctrl->subsys->lock); 2761 if (is_shared) 2762 head = __nvme_find_ns_head(ctrl->subsys, nsid); 2763 if (!head) { 2764 head = nvme_alloc_ns_head(ctrl, nsid, id); 2765 if (IS_ERR(head)) { 2766 ret = PTR_ERR(head); 2767 goto out_unlock; 2768 } 2769 2770 *new = true; 2771 } else { 2772 struct nvme_ns_ids ids; 2773 2774 nvme_report_ns_ids(ctrl, nsid, id, &ids); 2775 if (!nvme_ns_ids_equal(&head->ids, &ids)) { 2776 dev_err(ctrl->device, 2777 "IDs don't match for shared namespace %d\n", 2778 nsid); 2779 ret = -EINVAL; 2780 goto out_unlock; 2781 } 2782 2783 *new = false; 2784 } 2785 2786 list_add_tail(&ns->siblings, &head->list); 2787 ns->head = head; 2788 2789 out_unlock: 2790 mutex_unlock(&ctrl->subsys->lock); 2791 return ret; 2792 } 2793 2794 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) 2795 { 2796 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 2797 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 2798 2799 return nsa->head->ns_id - nsb->head->ns_id; 2800 } 2801 2802 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 2803 { 2804 struct nvme_ns *ns, *ret = NULL; 2805 2806 mutex_lock(&ctrl->namespaces_mutex); 2807 list_for_each_entry(ns, &ctrl->namespaces, list) { 2808 if (ns->head->ns_id == nsid) { 2809 if (!kref_get_unless_zero(&ns->kref)) 2810 continue; 2811 ret = ns; 2812 break; 2813 } 2814 if (ns->head->ns_id > nsid) 2815 break; 2816 } 2817 mutex_unlock(&ctrl->namespaces_mutex); 2818 return ret; 2819 } 2820 2821 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns) 2822 { 2823 struct streams_directive_params s; 2824 int ret; 2825 2826 if (!ctrl->nr_streams) 2827 return 0; 2828 2829 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); 2830 if (ret) 2831 return ret; 2832 2833 ns->sws = le32_to_cpu(s.sws); 2834 ns->sgs = le16_to_cpu(s.sgs); 2835 2836 if (ns->sws) { 2837 unsigned int bs = 1 << ns->lba_shift; 2838 2839 blk_queue_io_min(ns->queue, bs * ns->sws); 2840 if (ns->sgs) 2841 blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs); 2842 } 2843 2844 return 0; 2845 } 2846 2847 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 2848 { 2849 struct nvme_ns *ns; 2850 struct gendisk *disk; 2851 struct nvme_id_ns *id; 2852 char disk_name[DISK_NAME_LEN]; 2853 int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; 2854 bool new = true; 2855 2856 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 2857 if (!ns) 2858 return; 2859 2860 ns->queue = blk_mq_init_queue(ctrl->tagset); 2861 if (IS_ERR(ns->queue)) 2862 goto out_free_ns; 2863 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 2864 ns->queue->queuedata = ns; 2865 ns->ctrl = ctrl; 2866 2867 kref_init(&ns->kref); 2868 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 2869 2870 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 2871 nvme_set_queue_limits(ctrl, ns->queue); 2872 nvme_setup_streams_ns(ctrl, ns); 2873 2874 id = nvme_identify_ns(ctrl, nsid); 2875 if (!id) 2876 goto out_free_queue; 2877 2878 if (id->ncap == 0) 2879 goto out_free_id; 2880 2881 if (nvme_init_ns_head(ns, nsid, id, &new)) 2882 goto out_free_id; 2883 2884 #ifdef CONFIG_NVME_MULTIPATH 2885 /* 2886 * If multipathing is enabled we need to always use the subsystem 2887 * instance number for numbering our devices to avoid conflicts 2888 * between subsystems that have multiple controllers and thus use 2889 * the multipath-aware subsystem node and those that have a single 2890 * controller and use the controller node directly. 2891 */ 2892 if (ns->head->disk) { 2893 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, 2894 ctrl->cntlid, ns->head->instance); 2895 flags = GENHD_FL_HIDDEN; 2896 } else { 2897 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance, 2898 ns->head->instance); 2899 } 2900 #else 2901 /* 2902 * But without the multipath code enabled, multiple controller per 2903 * subsystems are visible as devices and thus we cannot use the 2904 * subsystem instance. 2905 */ 2906 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); 2907 #endif 2908 2909 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { 2910 if (nvme_nvm_register(ns, disk_name, node)) { 2911 dev_warn(ctrl->device, "LightNVM init failure\n"); 2912 goto out_unlink_ns; 2913 } 2914 } 2915 2916 disk = alloc_disk_node(0, node); 2917 if (!disk) 2918 goto out_unlink_ns; 2919 2920 disk->fops = &nvme_fops; 2921 disk->private_data = ns; 2922 disk->queue = ns->queue; 2923 disk->flags = flags; 2924 memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); 2925 ns->disk = disk; 2926 2927 __nvme_revalidate_disk(disk, id); 2928 2929 mutex_lock(&ctrl->namespaces_mutex); 2930 list_add_tail(&ns->list, &ctrl->namespaces); 2931 mutex_unlock(&ctrl->namespaces_mutex); 2932 2933 nvme_get_ctrl(ctrl); 2934 2935 kfree(id); 2936 2937 device_add_disk(ctrl->device, ns->disk); 2938 if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, 2939 &nvme_ns_id_attr_group)) 2940 pr_warn("%s: failed to create sysfs group for identification\n", 2941 ns->disk->disk_name); 2942 if (ns->ndev && nvme_nvm_register_sysfs(ns)) 2943 pr_warn("%s: failed to register lightnvm sysfs group for identification\n", 2944 ns->disk->disk_name); 2945 2946 if (new) 2947 nvme_mpath_add_disk(ns->head); 2948 nvme_mpath_add_disk_links(ns); 2949 return; 2950 out_unlink_ns: 2951 mutex_lock(&ctrl->subsys->lock); 2952 list_del_rcu(&ns->siblings); 2953 mutex_unlock(&ctrl->subsys->lock); 2954 out_free_id: 2955 kfree(id); 2956 out_free_queue: 2957 blk_cleanup_queue(ns->queue); 2958 out_free_ns: 2959 kfree(ns); 2960 } 2961 2962 static void nvme_ns_remove(struct nvme_ns *ns) 2963 { 2964 struct nvme_ns_head *head = ns->head; 2965 2966 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 2967 return; 2968 2969 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 2970 if (blk_get_integrity(ns->disk)) 2971 blk_integrity_unregister(ns->disk); 2972 nvme_mpath_remove_disk_links(ns); 2973 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 2974 &nvme_ns_id_attr_group); 2975 if (ns->ndev) 2976 nvme_nvm_unregister_sysfs(ns); 2977 del_gendisk(ns->disk); 2978 blk_cleanup_queue(ns->queue); 2979 } 2980 2981 mutex_lock(&ns->ctrl->subsys->lock); 2982 nvme_mpath_clear_current_path(ns); 2983 if (head) 2984 list_del_rcu(&ns->siblings); 2985 mutex_unlock(&ns->ctrl->subsys->lock); 2986 2987 mutex_lock(&ns->ctrl->namespaces_mutex); 2988 list_del_init(&ns->list); 2989 mutex_unlock(&ns->ctrl->namespaces_mutex); 2990 2991 synchronize_srcu(&head->srcu); 2992 nvme_put_ns(ns); 2993 } 2994 2995 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) 2996 { 2997 struct nvme_ns *ns; 2998 2999 ns = nvme_find_get_ns(ctrl, nsid); 3000 if (ns) { 3001 if (ns->disk && revalidate_disk(ns->disk)) 3002 nvme_ns_remove(ns); 3003 nvme_put_ns(ns); 3004 } else 3005 nvme_alloc_ns(ctrl, nsid); 3006 } 3007 3008 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 3009 unsigned nsid) 3010 { 3011 struct nvme_ns *ns, *next; 3012 3013 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3014 if (ns->head->ns_id > nsid) 3015 nvme_ns_remove(ns); 3016 } 3017 } 3018 3019 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) 3020 { 3021 struct nvme_ns *ns; 3022 __le32 *ns_list; 3023 unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); 3024 int ret = 0; 3025 3026 ns_list = kzalloc(0x1000, GFP_KERNEL); 3027 if (!ns_list) 3028 return -ENOMEM; 3029 3030 for (i = 0; i < num_lists; i++) { 3031 ret = nvme_identify_ns_list(ctrl, prev, ns_list); 3032 if (ret) 3033 goto free; 3034 3035 for (j = 0; j < min(nn, 1024U); j++) { 3036 nsid = le32_to_cpu(ns_list[j]); 3037 if (!nsid) 3038 goto out; 3039 3040 nvme_validate_ns(ctrl, nsid); 3041 3042 while (++prev < nsid) { 3043 ns = nvme_find_get_ns(ctrl, prev); 3044 if (ns) { 3045 nvme_ns_remove(ns); 3046 nvme_put_ns(ns); 3047 } 3048 } 3049 } 3050 nn -= j; 3051 } 3052 out: 3053 nvme_remove_invalid_namespaces(ctrl, prev); 3054 free: 3055 kfree(ns_list); 3056 return ret; 3057 } 3058 3059 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) 3060 { 3061 unsigned i; 3062 3063 for (i = 1; i <= nn; i++) 3064 nvme_validate_ns(ctrl, i); 3065 3066 nvme_remove_invalid_namespaces(ctrl, nn); 3067 } 3068 3069 static void nvme_scan_work(struct work_struct *work) 3070 { 3071 struct nvme_ctrl *ctrl = 3072 container_of(work, struct nvme_ctrl, scan_work); 3073 struct nvme_id_ctrl *id; 3074 unsigned nn; 3075 3076 if (ctrl->state != NVME_CTRL_LIVE) 3077 return; 3078 3079 if (nvme_identify_ctrl(ctrl, &id)) 3080 return; 3081 3082 nn = le32_to_cpu(id->nn); 3083 if (ctrl->vs >= NVME_VS(1, 1, 0) && 3084 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 3085 if (!nvme_scan_ns_list(ctrl, nn)) 3086 goto done; 3087 } 3088 nvme_scan_ns_sequential(ctrl, nn); 3089 done: 3090 mutex_lock(&ctrl->namespaces_mutex); 3091 list_sort(NULL, &ctrl->namespaces, ns_cmp); 3092 mutex_unlock(&ctrl->namespaces_mutex); 3093 kfree(id); 3094 } 3095 3096 void nvme_queue_scan(struct nvme_ctrl *ctrl) 3097 { 3098 /* 3099 * Do not queue new scan work when a controller is reset during 3100 * removal. 3101 */ 3102 if (ctrl->state == NVME_CTRL_LIVE) 3103 queue_work(nvme_wq, &ctrl->scan_work); 3104 } 3105 EXPORT_SYMBOL_GPL(nvme_queue_scan); 3106 3107 /* 3108 * This function iterates the namespace list unlocked to allow recovery from 3109 * controller failure. It is up to the caller to ensure the namespace list is 3110 * not modified by scan work while this function is executing. 3111 */ 3112 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 3113 { 3114 struct nvme_ns *ns, *next; 3115 3116 /* 3117 * The dead states indicates the controller was not gracefully 3118 * disconnected. In that case, we won't be able to flush any data while 3119 * removing the namespaces' disks; fail all the queues now to avoid 3120 * potentially having to clean up the failed sync later. 3121 */ 3122 if (ctrl->state == NVME_CTRL_DEAD) 3123 nvme_kill_queues(ctrl); 3124 3125 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) 3126 nvme_ns_remove(ns); 3127 } 3128 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 3129 3130 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 3131 { 3132 char *envp[2] = { NULL, NULL }; 3133 u32 aen_result = ctrl->aen_result; 3134 3135 ctrl->aen_result = 0; 3136 if (!aen_result) 3137 return; 3138 3139 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 3140 if (!envp[0]) 3141 return; 3142 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 3143 kfree(envp[0]); 3144 } 3145 3146 static void nvme_async_event_work(struct work_struct *work) 3147 { 3148 struct nvme_ctrl *ctrl = 3149 container_of(work, struct nvme_ctrl, async_event_work); 3150 3151 nvme_aen_uevent(ctrl); 3152 ctrl->ops->submit_async_event(ctrl); 3153 } 3154 3155 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 3156 { 3157 3158 u32 csts; 3159 3160 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 3161 return false; 3162 3163 if (csts == ~0) 3164 return false; 3165 3166 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 3167 } 3168 3169 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 3170 { 3171 struct nvme_fw_slot_info_log *log; 3172 3173 log = kmalloc(sizeof(*log), GFP_KERNEL); 3174 if (!log) 3175 return; 3176 3177 if (nvme_get_log(ctrl, NVME_LOG_FW_SLOT, log, sizeof(*log))) 3178 dev_warn(ctrl->device, 3179 "Get FW SLOT INFO log error\n"); 3180 kfree(log); 3181 } 3182 3183 static void nvme_fw_act_work(struct work_struct *work) 3184 { 3185 struct nvme_ctrl *ctrl = container_of(work, 3186 struct nvme_ctrl, fw_act_work); 3187 unsigned long fw_act_timeout; 3188 3189 if (ctrl->mtfa) 3190 fw_act_timeout = jiffies + 3191 msecs_to_jiffies(ctrl->mtfa * 100); 3192 else 3193 fw_act_timeout = jiffies + 3194 msecs_to_jiffies(admin_timeout * 1000); 3195 3196 nvme_stop_queues(ctrl); 3197 while (nvme_ctrl_pp_status(ctrl)) { 3198 if (time_after(jiffies, fw_act_timeout)) { 3199 dev_warn(ctrl->device, 3200 "Fw activation timeout, reset controller\n"); 3201 nvme_reset_ctrl(ctrl); 3202 break; 3203 } 3204 msleep(100); 3205 } 3206 3207 if (ctrl->state != NVME_CTRL_LIVE) 3208 return; 3209 3210 nvme_start_queues(ctrl); 3211 /* read FW slot information to clear the AER */ 3212 nvme_get_fw_slot_info(ctrl); 3213 } 3214 3215 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 3216 union nvme_result *res) 3217 { 3218 u32 result = le32_to_cpu(res->u32); 3219 3220 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 3221 return; 3222 3223 switch (result & 0x7) { 3224 case NVME_AER_ERROR: 3225 case NVME_AER_SMART: 3226 case NVME_AER_CSS: 3227 case NVME_AER_VS: 3228 ctrl->aen_result = result; 3229 break; 3230 default: 3231 break; 3232 } 3233 3234 switch (result & 0xff07) { 3235 case NVME_AER_NOTICE_NS_CHANGED: 3236 dev_info(ctrl->device, "rescanning\n"); 3237 nvme_queue_scan(ctrl); 3238 break; 3239 case NVME_AER_NOTICE_FW_ACT_STARTING: 3240 queue_work(nvme_wq, &ctrl->fw_act_work); 3241 break; 3242 default: 3243 dev_warn(ctrl->device, "async event result %08x\n", result); 3244 } 3245 queue_work(nvme_wq, &ctrl->async_event_work); 3246 } 3247 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 3248 3249 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 3250 { 3251 nvme_stop_keep_alive(ctrl); 3252 flush_work(&ctrl->async_event_work); 3253 flush_work(&ctrl->scan_work); 3254 cancel_work_sync(&ctrl->fw_act_work); 3255 } 3256 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 3257 3258 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 3259 { 3260 if (ctrl->kato) 3261 nvme_start_keep_alive(ctrl); 3262 3263 if (ctrl->queue_count > 1) { 3264 nvme_queue_scan(ctrl); 3265 queue_work(nvme_wq, &ctrl->async_event_work); 3266 nvme_start_queues(ctrl); 3267 } 3268 } 3269 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 3270 3271 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 3272 { 3273 cdev_device_del(&ctrl->cdev, ctrl->device); 3274 } 3275 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 3276 3277 static void nvme_free_ctrl(struct device *dev) 3278 { 3279 struct nvme_ctrl *ctrl = 3280 container_of(dev, struct nvme_ctrl, ctrl_device); 3281 struct nvme_subsystem *subsys = ctrl->subsys; 3282 3283 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 3284 kfree(ctrl->effects); 3285 3286 if (subsys) { 3287 mutex_lock(&subsys->lock); 3288 list_del(&ctrl->subsys_entry); 3289 mutex_unlock(&subsys->lock); 3290 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 3291 } 3292 3293 ctrl->ops->free_ctrl(ctrl); 3294 3295 if (subsys) 3296 nvme_put_subsystem(subsys); 3297 } 3298 3299 /* 3300 * Initialize a NVMe controller structures. This needs to be called during 3301 * earliest initialization so that we have the initialized structured around 3302 * during probing. 3303 */ 3304 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 3305 const struct nvme_ctrl_ops *ops, unsigned long quirks) 3306 { 3307 int ret; 3308 3309 ctrl->state = NVME_CTRL_NEW; 3310 spin_lock_init(&ctrl->lock); 3311 INIT_LIST_HEAD(&ctrl->namespaces); 3312 mutex_init(&ctrl->namespaces_mutex); 3313 ctrl->dev = dev; 3314 ctrl->ops = ops; 3315 ctrl->quirks = quirks; 3316 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 3317 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 3318 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 3319 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 3320 3321 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); 3322 if (ret < 0) 3323 goto out; 3324 ctrl->instance = ret; 3325 3326 device_initialize(&ctrl->ctrl_device); 3327 ctrl->device = &ctrl->ctrl_device; 3328 ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance); 3329 ctrl->device->class = nvme_class; 3330 ctrl->device->parent = ctrl->dev; 3331 ctrl->device->groups = nvme_dev_attr_groups; 3332 ctrl->device->release = nvme_free_ctrl; 3333 dev_set_drvdata(ctrl->device, ctrl); 3334 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 3335 if (ret) 3336 goto out_release_instance; 3337 3338 cdev_init(&ctrl->cdev, &nvme_dev_fops); 3339 ctrl->cdev.owner = ops->module; 3340 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 3341 if (ret) 3342 goto out_free_name; 3343 3344 /* 3345 * Initialize latency tolerance controls. The sysfs files won't 3346 * be visible to userspace unless the device actually supports APST. 3347 */ 3348 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 3349 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 3350 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 3351 3352 return 0; 3353 out_free_name: 3354 kfree_const(dev->kobj.name); 3355 out_release_instance: 3356 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 3357 out: 3358 return ret; 3359 } 3360 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 3361 3362 /** 3363 * nvme_kill_queues(): Ends all namespace queues 3364 * @ctrl: the dead controller that needs to end 3365 * 3366 * Call this function when the driver determines it is unable to get the 3367 * controller in a state capable of servicing IO. 3368 */ 3369 void nvme_kill_queues(struct nvme_ctrl *ctrl) 3370 { 3371 struct nvme_ns *ns; 3372 3373 mutex_lock(&ctrl->namespaces_mutex); 3374 3375 /* Forcibly unquiesce queues to avoid blocking dispatch */ 3376 if (ctrl->admin_q) 3377 blk_mq_unquiesce_queue(ctrl->admin_q); 3378 3379 list_for_each_entry(ns, &ctrl->namespaces, list) { 3380 /* 3381 * Revalidating a dead namespace sets capacity to 0. This will 3382 * end buffered writers dirtying pages that can't be synced. 3383 */ 3384 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 3385 continue; 3386 revalidate_disk(ns->disk); 3387 blk_set_queue_dying(ns->queue); 3388 3389 /* Forcibly unquiesce queues to avoid blocking dispatch */ 3390 blk_mq_unquiesce_queue(ns->queue); 3391 } 3392 mutex_unlock(&ctrl->namespaces_mutex); 3393 } 3394 EXPORT_SYMBOL_GPL(nvme_kill_queues); 3395 3396 void nvme_unfreeze(struct nvme_ctrl *ctrl) 3397 { 3398 struct nvme_ns *ns; 3399 3400 mutex_lock(&ctrl->namespaces_mutex); 3401 list_for_each_entry(ns, &ctrl->namespaces, list) 3402 blk_mq_unfreeze_queue(ns->queue); 3403 mutex_unlock(&ctrl->namespaces_mutex); 3404 } 3405 EXPORT_SYMBOL_GPL(nvme_unfreeze); 3406 3407 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 3408 { 3409 struct nvme_ns *ns; 3410 3411 mutex_lock(&ctrl->namespaces_mutex); 3412 list_for_each_entry(ns, &ctrl->namespaces, list) { 3413 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 3414 if (timeout <= 0) 3415 break; 3416 } 3417 mutex_unlock(&ctrl->namespaces_mutex); 3418 } 3419 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 3420 3421 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 3422 { 3423 struct nvme_ns *ns; 3424 3425 mutex_lock(&ctrl->namespaces_mutex); 3426 list_for_each_entry(ns, &ctrl->namespaces, list) 3427 blk_mq_freeze_queue_wait(ns->queue); 3428 mutex_unlock(&ctrl->namespaces_mutex); 3429 } 3430 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 3431 3432 void nvme_start_freeze(struct nvme_ctrl *ctrl) 3433 { 3434 struct nvme_ns *ns; 3435 3436 mutex_lock(&ctrl->namespaces_mutex); 3437 list_for_each_entry(ns, &ctrl->namespaces, list) 3438 blk_freeze_queue_start(ns->queue); 3439 mutex_unlock(&ctrl->namespaces_mutex); 3440 } 3441 EXPORT_SYMBOL_GPL(nvme_start_freeze); 3442 3443 void nvme_stop_queues(struct nvme_ctrl *ctrl) 3444 { 3445 struct nvme_ns *ns; 3446 3447 mutex_lock(&ctrl->namespaces_mutex); 3448 list_for_each_entry(ns, &ctrl->namespaces, list) 3449 blk_mq_quiesce_queue(ns->queue); 3450 mutex_unlock(&ctrl->namespaces_mutex); 3451 } 3452 EXPORT_SYMBOL_GPL(nvme_stop_queues); 3453 3454 void nvme_start_queues(struct nvme_ctrl *ctrl) 3455 { 3456 struct nvme_ns *ns; 3457 3458 mutex_lock(&ctrl->namespaces_mutex); 3459 list_for_each_entry(ns, &ctrl->namespaces, list) 3460 blk_mq_unquiesce_queue(ns->queue); 3461 mutex_unlock(&ctrl->namespaces_mutex); 3462 } 3463 EXPORT_SYMBOL_GPL(nvme_start_queues); 3464 3465 int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set) 3466 { 3467 if (!ctrl->ops->reinit_request) 3468 return 0; 3469 3470 return blk_mq_tagset_iter(set, set->driver_data, 3471 ctrl->ops->reinit_request); 3472 } 3473 EXPORT_SYMBOL_GPL(nvme_reinit_tagset); 3474 3475 int __init nvme_core_init(void) 3476 { 3477 int result; 3478 3479 nvme_wq = alloc_workqueue("nvme-wq", 3480 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 3481 if (!nvme_wq) 3482 return -ENOMEM; 3483 3484 result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme"); 3485 if (result < 0) 3486 goto destroy_wq; 3487 3488 nvme_class = class_create(THIS_MODULE, "nvme"); 3489 if (IS_ERR(nvme_class)) { 3490 result = PTR_ERR(nvme_class); 3491 goto unregister_chrdev; 3492 } 3493 3494 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); 3495 if (IS_ERR(nvme_subsys_class)) { 3496 result = PTR_ERR(nvme_subsys_class); 3497 goto destroy_class; 3498 } 3499 return 0; 3500 3501 destroy_class: 3502 class_destroy(nvme_class); 3503 unregister_chrdev: 3504 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 3505 destroy_wq: 3506 destroy_workqueue(nvme_wq); 3507 return result; 3508 } 3509 3510 void nvme_core_exit(void) 3511 { 3512 ida_destroy(&nvme_subsystems_ida); 3513 class_destroy(nvme_subsys_class); 3514 class_destroy(nvme_class); 3515 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 3516 destroy_workqueue(nvme_wq); 3517 } 3518 3519 MODULE_LICENSE("GPL"); 3520 MODULE_VERSION("1.0"); 3521 module_init(nvme_core_init); 3522 module_exit(nvme_core_exit); 3523