1 /* 2 * NVM Express device driver 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/blkdev.h> 16 #include <linux/blk-mq.h> 17 #include <linux/delay.h> 18 #include <linux/errno.h> 19 #include <linux/hdreg.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/list_sort.h> 23 #include <linux/slab.h> 24 #include <linux/types.h> 25 #include <linux/pr.h> 26 #include <linux/ptrace.h> 27 #include <linux/nvme_ioctl.h> 28 #include <linux/t10-pi.h> 29 #include <linux/pm_qos.h> 30 #include <scsi/sg.h> 31 #include <asm/unaligned.h> 32 33 #include "nvme.h" 34 #include "fabrics.h" 35 36 #define NVME_MINORS (1U << MINORBITS) 37 38 unsigned char admin_timeout = 60; 39 module_param(admin_timeout, byte, 0644); 40 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 41 EXPORT_SYMBOL_GPL(admin_timeout); 42 43 unsigned char nvme_io_timeout = 30; 44 module_param_named(io_timeout, nvme_io_timeout, byte, 0644); 45 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 46 EXPORT_SYMBOL_GPL(nvme_io_timeout); 47 48 unsigned char shutdown_timeout = 5; 49 module_param(shutdown_timeout, byte, 0644); 50 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 51 52 unsigned int nvme_max_retries = 5; 53 module_param_named(max_retries, nvme_max_retries, uint, 0644); 54 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 55 EXPORT_SYMBOL_GPL(nvme_max_retries); 56 57 static int nvme_char_major; 58 module_param(nvme_char_major, int, 0); 59 60 static unsigned long default_ps_max_latency_us = 25000; 61 module_param(default_ps_max_latency_us, ulong, 0644); 62 MODULE_PARM_DESC(default_ps_max_latency_us, 63 "max power saving latency for new devices; use PM QOS to change per device"); 64 65 static LIST_HEAD(nvme_ctrl_list); 66 static DEFINE_SPINLOCK(dev_list_lock); 67 68 static struct class *nvme_class; 69 70 void nvme_cancel_request(struct request *req, void *data, bool reserved) 71 { 72 int status; 73 74 if (!blk_mq_request_started(req)) 75 return; 76 77 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 78 "Cancelling I/O %d", req->tag); 79 80 status = NVME_SC_ABORT_REQ; 81 if (blk_queue_dying(req->q)) 82 status |= NVME_SC_DNR; 83 blk_mq_complete_request(req, status); 84 } 85 EXPORT_SYMBOL_GPL(nvme_cancel_request); 86 87 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 88 enum nvme_ctrl_state new_state) 89 { 90 enum nvme_ctrl_state old_state; 91 bool changed = false; 92 93 spin_lock_irq(&ctrl->lock); 94 95 old_state = ctrl->state; 96 switch (new_state) { 97 case NVME_CTRL_LIVE: 98 switch (old_state) { 99 case NVME_CTRL_NEW: 100 case NVME_CTRL_RESETTING: 101 case NVME_CTRL_RECONNECTING: 102 changed = true; 103 /* FALLTHRU */ 104 default: 105 break; 106 } 107 break; 108 case NVME_CTRL_RESETTING: 109 switch (old_state) { 110 case NVME_CTRL_NEW: 111 case NVME_CTRL_LIVE: 112 case NVME_CTRL_RECONNECTING: 113 changed = true; 114 /* FALLTHRU */ 115 default: 116 break; 117 } 118 break; 119 case NVME_CTRL_RECONNECTING: 120 switch (old_state) { 121 case NVME_CTRL_LIVE: 122 changed = true; 123 /* FALLTHRU */ 124 default: 125 break; 126 } 127 break; 128 case NVME_CTRL_DELETING: 129 switch (old_state) { 130 case NVME_CTRL_LIVE: 131 case NVME_CTRL_RESETTING: 132 case NVME_CTRL_RECONNECTING: 133 changed = true; 134 /* FALLTHRU */ 135 default: 136 break; 137 } 138 break; 139 case NVME_CTRL_DEAD: 140 switch (old_state) { 141 case NVME_CTRL_DELETING: 142 changed = true; 143 /* FALLTHRU */ 144 default: 145 break; 146 } 147 break; 148 default: 149 break; 150 } 151 152 if (changed) 153 ctrl->state = new_state; 154 155 spin_unlock_irq(&ctrl->lock); 156 157 return changed; 158 } 159 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 160 161 static void nvme_free_ns(struct kref *kref) 162 { 163 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 164 165 if (ns->ndev) 166 nvme_nvm_unregister(ns); 167 168 if (ns->disk) { 169 spin_lock(&dev_list_lock); 170 ns->disk->private_data = NULL; 171 spin_unlock(&dev_list_lock); 172 } 173 174 put_disk(ns->disk); 175 ida_simple_remove(&ns->ctrl->ns_ida, ns->instance); 176 nvme_put_ctrl(ns->ctrl); 177 kfree(ns); 178 } 179 180 static void nvme_put_ns(struct nvme_ns *ns) 181 { 182 kref_put(&ns->kref, nvme_free_ns); 183 } 184 185 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk) 186 { 187 struct nvme_ns *ns; 188 189 spin_lock(&dev_list_lock); 190 ns = disk->private_data; 191 if (ns) { 192 if (!kref_get_unless_zero(&ns->kref)) 193 goto fail; 194 if (!try_module_get(ns->ctrl->ops->module)) 195 goto fail_put_ns; 196 } 197 spin_unlock(&dev_list_lock); 198 199 return ns; 200 201 fail_put_ns: 202 kref_put(&ns->kref, nvme_free_ns); 203 fail: 204 spin_unlock(&dev_list_lock); 205 return NULL; 206 } 207 208 void nvme_requeue_req(struct request *req) 209 { 210 blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q)); 211 } 212 EXPORT_SYMBOL_GPL(nvme_requeue_req); 213 214 struct request *nvme_alloc_request(struct request_queue *q, 215 struct nvme_command *cmd, unsigned int flags, int qid) 216 { 217 unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 218 struct request *req; 219 220 if (qid == NVME_QID_ANY) { 221 req = blk_mq_alloc_request(q, op, flags); 222 } else { 223 req = blk_mq_alloc_request_hctx(q, op, flags, 224 qid ? qid - 1 : 0); 225 } 226 if (IS_ERR(req)) 227 return req; 228 229 req->cmd_flags |= REQ_FAILFAST_DRIVER; 230 nvme_req(req)->cmd = cmd; 231 232 return req; 233 } 234 EXPORT_SYMBOL_GPL(nvme_alloc_request); 235 236 static inline void nvme_setup_flush(struct nvme_ns *ns, 237 struct nvme_command *cmnd) 238 { 239 memset(cmnd, 0, sizeof(*cmnd)); 240 cmnd->common.opcode = nvme_cmd_flush; 241 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 242 } 243 244 static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, 245 struct nvme_command *cmnd) 246 { 247 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 248 struct nvme_dsm_range *range; 249 struct bio *bio; 250 251 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); 252 if (!range) 253 return BLK_MQ_RQ_QUEUE_BUSY; 254 255 __rq_for_each_bio(bio, req) { 256 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 257 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 258 259 range[n].cattr = cpu_to_le32(0); 260 range[n].nlb = cpu_to_le32(nlb); 261 range[n].slba = cpu_to_le64(slba); 262 n++; 263 } 264 265 if (WARN_ON_ONCE(n != segments)) { 266 kfree(range); 267 return BLK_MQ_RQ_QUEUE_ERROR; 268 } 269 270 memset(cmnd, 0, sizeof(*cmnd)); 271 cmnd->dsm.opcode = nvme_cmd_dsm; 272 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 273 cmnd->dsm.nr = segments - 1; 274 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 275 276 req->special_vec.bv_page = virt_to_page(range); 277 req->special_vec.bv_offset = offset_in_page(range); 278 req->special_vec.bv_len = sizeof(*range) * segments; 279 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 280 281 return BLK_MQ_RQ_QUEUE_OK; 282 } 283 284 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, 285 struct nvme_command *cmnd) 286 { 287 u16 control = 0; 288 u32 dsmgmt = 0; 289 290 if (req->cmd_flags & REQ_FUA) 291 control |= NVME_RW_FUA; 292 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 293 control |= NVME_RW_LR; 294 295 if (req->cmd_flags & REQ_RAHEAD) 296 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 297 298 memset(cmnd, 0, sizeof(*cmnd)); 299 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); 300 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 301 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 302 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 303 304 if (ns->ms) { 305 switch (ns->pi_type) { 306 case NVME_NS_DPS_PI_TYPE3: 307 control |= NVME_RW_PRINFO_PRCHK_GUARD; 308 break; 309 case NVME_NS_DPS_PI_TYPE1: 310 case NVME_NS_DPS_PI_TYPE2: 311 control |= NVME_RW_PRINFO_PRCHK_GUARD | 312 NVME_RW_PRINFO_PRCHK_REF; 313 cmnd->rw.reftag = cpu_to_le32( 314 nvme_block_nr(ns, blk_rq_pos(req))); 315 break; 316 } 317 if (!blk_integrity_rq(req)) 318 control |= NVME_RW_PRINFO_PRACT; 319 } 320 321 cmnd->rw.control = cpu_to_le16(control); 322 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 323 } 324 325 int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 326 struct nvme_command *cmd) 327 { 328 int ret = BLK_MQ_RQ_QUEUE_OK; 329 330 switch (req_op(req)) { 331 case REQ_OP_DRV_IN: 332 case REQ_OP_DRV_OUT: 333 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); 334 break; 335 case REQ_OP_FLUSH: 336 nvme_setup_flush(ns, cmd); 337 break; 338 case REQ_OP_DISCARD: 339 ret = nvme_setup_discard(ns, req, cmd); 340 break; 341 case REQ_OP_READ: 342 case REQ_OP_WRITE: 343 nvme_setup_rw(ns, req, cmd); 344 break; 345 default: 346 WARN_ON_ONCE(1); 347 return BLK_MQ_RQ_QUEUE_ERROR; 348 } 349 350 cmd->common.command_id = req->tag; 351 return ret; 352 } 353 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 354 355 /* 356 * Returns 0 on success. If the result is negative, it's a Linux error code; 357 * if the result is positive, it's an NVM Express status code 358 */ 359 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 360 union nvme_result *result, void *buffer, unsigned bufflen, 361 unsigned timeout, int qid, int at_head, int flags) 362 { 363 struct request *req; 364 int ret; 365 366 req = nvme_alloc_request(q, cmd, flags, qid); 367 if (IS_ERR(req)) 368 return PTR_ERR(req); 369 370 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 371 372 if (buffer && bufflen) { 373 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 374 if (ret) 375 goto out; 376 } 377 378 blk_execute_rq(req->q, NULL, req, at_head); 379 if (result) 380 *result = nvme_req(req)->result; 381 ret = req->errors; 382 out: 383 blk_mq_free_request(req); 384 return ret; 385 } 386 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 387 388 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 389 void *buffer, unsigned bufflen) 390 { 391 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, 392 NVME_QID_ANY, 0, 0); 393 } 394 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 395 396 int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, 397 void __user *ubuffer, unsigned bufflen, 398 void __user *meta_buffer, unsigned meta_len, u32 meta_seed, 399 u32 *result, unsigned timeout) 400 { 401 bool write = nvme_is_write(cmd); 402 struct nvme_ns *ns = q->queuedata; 403 struct gendisk *disk = ns ? ns->disk : NULL; 404 struct request *req; 405 struct bio *bio = NULL; 406 void *meta = NULL; 407 int ret; 408 409 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); 410 if (IS_ERR(req)) 411 return PTR_ERR(req); 412 413 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 414 415 if (ubuffer && bufflen) { 416 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 417 GFP_KERNEL); 418 if (ret) 419 goto out; 420 bio = req->bio; 421 422 if (!disk) 423 goto submit; 424 bio->bi_bdev = bdget_disk(disk, 0); 425 if (!bio->bi_bdev) { 426 ret = -ENODEV; 427 goto out_unmap; 428 } 429 430 if (meta_buffer && meta_len) { 431 struct bio_integrity_payload *bip; 432 433 meta = kmalloc(meta_len, GFP_KERNEL); 434 if (!meta) { 435 ret = -ENOMEM; 436 goto out_unmap; 437 } 438 439 if (write) { 440 if (copy_from_user(meta, meta_buffer, 441 meta_len)) { 442 ret = -EFAULT; 443 goto out_free_meta; 444 } 445 } 446 447 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 448 if (IS_ERR(bip)) { 449 ret = PTR_ERR(bip); 450 goto out_free_meta; 451 } 452 453 bip->bip_iter.bi_size = meta_len; 454 bip->bip_iter.bi_sector = meta_seed; 455 456 ret = bio_integrity_add_page(bio, virt_to_page(meta), 457 meta_len, offset_in_page(meta)); 458 if (ret != meta_len) { 459 ret = -ENOMEM; 460 goto out_free_meta; 461 } 462 } 463 } 464 submit: 465 blk_execute_rq(req->q, disk, req, 0); 466 ret = req->errors; 467 if (result) 468 *result = le32_to_cpu(nvme_req(req)->result.u32); 469 if (meta && !ret && !write) { 470 if (copy_to_user(meta_buffer, meta, meta_len)) 471 ret = -EFAULT; 472 } 473 out_free_meta: 474 kfree(meta); 475 out_unmap: 476 if (bio) { 477 if (disk && bio->bi_bdev) 478 bdput(bio->bi_bdev); 479 blk_rq_unmap_user(bio); 480 } 481 out: 482 blk_mq_free_request(req); 483 return ret; 484 } 485 486 int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, 487 void __user *ubuffer, unsigned bufflen, u32 *result, 488 unsigned timeout) 489 { 490 return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0, 491 result, timeout); 492 } 493 494 static void nvme_keep_alive_end_io(struct request *rq, int error) 495 { 496 struct nvme_ctrl *ctrl = rq->end_io_data; 497 498 blk_mq_free_request(rq); 499 500 if (error) { 501 dev_err(ctrl->device, 502 "failed nvme_keep_alive_end_io error=%d\n", error); 503 return; 504 } 505 506 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 507 } 508 509 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 510 { 511 struct nvme_command c; 512 struct request *rq; 513 514 memset(&c, 0, sizeof(c)); 515 c.common.opcode = nvme_admin_keep_alive; 516 517 rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, 518 NVME_QID_ANY); 519 if (IS_ERR(rq)) 520 return PTR_ERR(rq); 521 522 rq->timeout = ctrl->kato * HZ; 523 rq->end_io_data = ctrl; 524 525 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); 526 527 return 0; 528 } 529 530 static void nvme_keep_alive_work(struct work_struct *work) 531 { 532 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 533 struct nvme_ctrl, ka_work); 534 535 if (nvme_keep_alive(ctrl)) { 536 /* allocation failure, reset the controller */ 537 dev_err(ctrl->device, "keep-alive failed\n"); 538 ctrl->ops->reset_ctrl(ctrl); 539 return; 540 } 541 } 542 543 void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 544 { 545 if (unlikely(ctrl->kato == 0)) 546 return; 547 548 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 549 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 550 } 551 EXPORT_SYMBOL_GPL(nvme_start_keep_alive); 552 553 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 554 { 555 if (unlikely(ctrl->kato == 0)) 556 return; 557 558 cancel_delayed_work_sync(&ctrl->ka_work); 559 } 560 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 561 562 int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 563 { 564 struct nvme_command c = { }; 565 int error; 566 567 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 568 c.identify.opcode = nvme_admin_identify; 569 c.identify.cns = NVME_ID_CNS_CTRL; 570 571 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 572 if (!*id) 573 return -ENOMEM; 574 575 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 576 sizeof(struct nvme_id_ctrl)); 577 if (error) 578 kfree(*id); 579 return error; 580 } 581 582 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) 583 { 584 struct nvme_command c = { }; 585 586 c.identify.opcode = nvme_admin_identify; 587 c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; 588 c.identify.nsid = cpu_to_le32(nsid); 589 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); 590 } 591 592 int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, 593 struct nvme_id_ns **id) 594 { 595 struct nvme_command c = { }; 596 int error; 597 598 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 599 c.identify.opcode = nvme_admin_identify; 600 c.identify.nsid = cpu_to_le32(nsid); 601 c.identify.cns = NVME_ID_CNS_NS; 602 603 *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL); 604 if (!*id) 605 return -ENOMEM; 606 607 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 608 sizeof(struct nvme_id_ns)); 609 if (error) 610 kfree(*id); 611 return error; 612 } 613 614 int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, 615 void *buffer, size_t buflen, u32 *result) 616 { 617 struct nvme_command c; 618 union nvme_result res; 619 int ret; 620 621 memset(&c, 0, sizeof(c)); 622 c.features.opcode = nvme_admin_get_features; 623 c.features.nsid = cpu_to_le32(nsid); 624 c.features.fid = cpu_to_le32(fid); 625 626 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0, 627 NVME_QID_ANY, 0, 0); 628 if (ret >= 0 && result) 629 *result = le32_to_cpu(res.u32); 630 return ret; 631 } 632 633 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, 634 void *buffer, size_t buflen, u32 *result) 635 { 636 struct nvme_command c; 637 union nvme_result res; 638 int ret; 639 640 memset(&c, 0, sizeof(c)); 641 c.features.opcode = nvme_admin_set_features; 642 c.features.fid = cpu_to_le32(fid); 643 c.features.dword11 = cpu_to_le32(dword11); 644 645 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 646 buffer, buflen, 0, NVME_QID_ANY, 0, 0); 647 if (ret >= 0 && result) 648 *result = le32_to_cpu(res.u32); 649 return ret; 650 } 651 652 int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log) 653 { 654 struct nvme_command c = { }; 655 int error; 656 657 c.common.opcode = nvme_admin_get_log_page, 658 c.common.nsid = cpu_to_le32(0xFFFFFFFF), 659 c.common.cdw10[0] = cpu_to_le32( 660 (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) | 661 NVME_LOG_SMART), 662 663 *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL); 664 if (!*log) 665 return -ENOMEM; 666 667 error = nvme_submit_sync_cmd(dev->admin_q, &c, *log, 668 sizeof(struct nvme_smart_log)); 669 if (error) 670 kfree(*log); 671 return error; 672 } 673 674 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 675 { 676 u32 q_count = (*count - 1) | ((*count - 1) << 16); 677 u32 result; 678 int status, nr_io_queues; 679 680 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 681 &result); 682 if (status < 0) 683 return status; 684 685 /* 686 * Degraded controllers might return an error when setting the queue 687 * count. We still want to be able to bring them online and offer 688 * access to the admin queue, as that might be only way to fix them up. 689 */ 690 if (status > 0) { 691 dev_err(ctrl->dev, "Could not set queue count (%d)\n", status); 692 *count = 0; 693 } else { 694 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 695 *count = min(*count, nr_io_queues); 696 } 697 698 return 0; 699 } 700 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 701 702 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 703 { 704 struct nvme_user_io io; 705 struct nvme_command c; 706 unsigned length, meta_len; 707 void __user *metadata; 708 709 if (copy_from_user(&io, uio, sizeof(io))) 710 return -EFAULT; 711 if (io.flags) 712 return -EINVAL; 713 714 switch (io.opcode) { 715 case nvme_cmd_write: 716 case nvme_cmd_read: 717 case nvme_cmd_compare: 718 break; 719 default: 720 return -EINVAL; 721 } 722 723 length = (io.nblocks + 1) << ns->lba_shift; 724 meta_len = (io.nblocks + 1) * ns->ms; 725 metadata = (void __user *)(uintptr_t)io.metadata; 726 727 if (ns->ext) { 728 length += meta_len; 729 meta_len = 0; 730 } else if (meta_len) { 731 if ((io.metadata & 3) || !io.metadata) 732 return -EINVAL; 733 } 734 735 memset(&c, 0, sizeof(c)); 736 c.rw.opcode = io.opcode; 737 c.rw.flags = io.flags; 738 c.rw.nsid = cpu_to_le32(ns->ns_id); 739 c.rw.slba = cpu_to_le64(io.slba); 740 c.rw.length = cpu_to_le16(io.nblocks); 741 c.rw.control = cpu_to_le16(io.control); 742 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 743 c.rw.reftag = cpu_to_le32(io.reftag); 744 c.rw.apptag = cpu_to_le16(io.apptag); 745 c.rw.appmask = cpu_to_le16(io.appmask); 746 747 return __nvme_submit_user_cmd(ns->queue, &c, 748 (void __user *)(uintptr_t)io.addr, length, 749 metadata, meta_len, io.slba, NULL, 0); 750 } 751 752 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 753 struct nvme_passthru_cmd __user *ucmd) 754 { 755 struct nvme_passthru_cmd cmd; 756 struct nvme_command c; 757 unsigned timeout = 0; 758 int status; 759 760 if (!capable(CAP_SYS_ADMIN)) 761 return -EACCES; 762 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 763 return -EFAULT; 764 if (cmd.flags) 765 return -EINVAL; 766 767 memset(&c, 0, sizeof(c)); 768 c.common.opcode = cmd.opcode; 769 c.common.flags = cmd.flags; 770 c.common.nsid = cpu_to_le32(cmd.nsid); 771 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 772 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 773 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 774 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 775 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 776 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 777 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 778 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 779 780 if (cmd.timeout_ms) 781 timeout = msecs_to_jiffies(cmd.timeout_ms); 782 783 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 784 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 785 &cmd.result, timeout); 786 if (status >= 0) { 787 if (put_user(cmd.result, &ucmd->result)) 788 return -EFAULT; 789 } 790 791 return status; 792 } 793 794 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 795 unsigned int cmd, unsigned long arg) 796 { 797 struct nvme_ns *ns = bdev->bd_disk->private_data; 798 799 switch (cmd) { 800 case NVME_IOCTL_ID: 801 force_successful_syscall_return(); 802 return ns->ns_id; 803 case NVME_IOCTL_ADMIN_CMD: 804 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); 805 case NVME_IOCTL_IO_CMD: 806 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); 807 case NVME_IOCTL_SUBMIT_IO: 808 return nvme_submit_io(ns, (void __user *)arg); 809 #ifdef CONFIG_BLK_DEV_NVME_SCSI 810 case SG_GET_VERSION_NUM: 811 return nvme_sg_get_version_num((void __user *)arg); 812 case SG_IO: 813 return nvme_sg_io(ns, (void __user *)arg); 814 #endif 815 default: 816 #ifdef CONFIG_NVM 817 if (ns->ndev) 818 return nvme_nvm_ioctl(ns, cmd, arg); 819 #endif 820 if (is_sed_ioctl(cmd)) 821 return sed_ioctl(ns->ctrl->opal_dev, cmd, 822 (void __user *) arg); 823 return -ENOTTY; 824 } 825 } 826 827 #ifdef CONFIG_COMPAT 828 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, 829 unsigned int cmd, unsigned long arg) 830 { 831 switch (cmd) { 832 case SG_IO: 833 return -ENOIOCTLCMD; 834 } 835 return nvme_ioctl(bdev, mode, cmd, arg); 836 } 837 #else 838 #define nvme_compat_ioctl NULL 839 #endif 840 841 static int nvme_open(struct block_device *bdev, fmode_t mode) 842 { 843 return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO; 844 } 845 846 static void nvme_release(struct gendisk *disk, fmode_t mode) 847 { 848 struct nvme_ns *ns = disk->private_data; 849 850 module_put(ns->ctrl->ops->module); 851 nvme_put_ns(ns); 852 } 853 854 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 855 { 856 /* some standard values */ 857 geo->heads = 1 << 6; 858 geo->sectors = 1 << 5; 859 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 860 return 0; 861 } 862 863 #ifdef CONFIG_BLK_DEV_INTEGRITY 864 static void nvme_init_integrity(struct nvme_ns *ns) 865 { 866 struct blk_integrity integrity; 867 868 memset(&integrity, 0, sizeof(integrity)); 869 switch (ns->pi_type) { 870 case NVME_NS_DPS_PI_TYPE3: 871 integrity.profile = &t10_pi_type3_crc; 872 integrity.tag_size = sizeof(u16) + sizeof(u32); 873 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 874 break; 875 case NVME_NS_DPS_PI_TYPE1: 876 case NVME_NS_DPS_PI_TYPE2: 877 integrity.profile = &t10_pi_type1_crc; 878 integrity.tag_size = sizeof(u16); 879 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 880 break; 881 default: 882 integrity.profile = NULL; 883 break; 884 } 885 integrity.tuple_size = ns->ms; 886 blk_integrity_register(ns->disk, &integrity); 887 blk_queue_max_integrity_segments(ns->queue, 1); 888 } 889 #else 890 static void nvme_init_integrity(struct nvme_ns *ns) 891 { 892 } 893 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 894 895 static void nvme_config_discard(struct nvme_ns *ns) 896 { 897 struct nvme_ctrl *ctrl = ns->ctrl; 898 u32 logical_block_size = queue_logical_block_size(ns->queue); 899 900 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 901 NVME_DSM_MAX_RANGES); 902 903 if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES) 904 ns->queue->limits.discard_zeroes_data = 1; 905 else 906 ns->queue->limits.discard_zeroes_data = 0; 907 908 ns->queue->limits.discard_alignment = logical_block_size; 909 ns->queue->limits.discard_granularity = logical_block_size; 910 blk_queue_max_discard_sectors(ns->queue, UINT_MAX); 911 blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES); 912 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); 913 } 914 915 static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) 916 { 917 if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) { 918 dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__); 919 return -ENODEV; 920 } 921 922 if ((*id)->ncap == 0) { 923 kfree(*id); 924 return -ENODEV; 925 } 926 927 if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) 928 memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); 929 if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) 930 memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); 931 932 return 0; 933 } 934 935 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) 936 { 937 struct nvme_ns *ns = disk->private_data; 938 u8 lbaf, pi_type; 939 u16 old_ms; 940 unsigned short bs; 941 942 old_ms = ns->ms; 943 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; 944 ns->lba_shift = id->lbaf[lbaf].ds; 945 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 946 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 947 948 /* 949 * If identify namespace failed, use default 512 byte block size so 950 * block layer can use before failing read/write for 0 capacity. 951 */ 952 if (ns->lba_shift == 0) 953 ns->lba_shift = 9; 954 bs = 1 << ns->lba_shift; 955 /* XXX: PI implementation requires metadata equal t10 pi tuple size */ 956 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? 957 id->dps & NVME_NS_DPS_PI_MASK : 0; 958 959 blk_mq_freeze_queue(disk->queue); 960 if (blk_get_integrity(disk) && (ns->pi_type != pi_type || 961 ns->ms != old_ms || 962 bs != queue_logical_block_size(disk->queue) || 963 (ns->ms && ns->ext))) 964 blk_integrity_unregister(disk); 965 966 ns->pi_type = pi_type; 967 blk_queue_logical_block_size(ns->queue, bs); 968 969 if (ns->ms && !blk_get_integrity(disk) && !ns->ext) 970 nvme_init_integrity(ns); 971 if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) 972 set_capacity(disk, 0); 973 else 974 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 975 976 if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM) 977 nvme_config_discard(ns); 978 blk_mq_unfreeze_queue(disk->queue); 979 } 980 981 static int nvme_revalidate_disk(struct gendisk *disk) 982 { 983 struct nvme_ns *ns = disk->private_data; 984 struct nvme_id_ns *id = NULL; 985 int ret; 986 987 if (test_bit(NVME_NS_DEAD, &ns->flags)) { 988 set_capacity(disk, 0); 989 return -ENODEV; 990 } 991 992 ret = nvme_revalidate_ns(ns, &id); 993 if (ret) 994 return ret; 995 996 __nvme_revalidate_disk(disk, id); 997 kfree(id); 998 999 return 0; 1000 } 1001 1002 static char nvme_pr_type(enum pr_type type) 1003 { 1004 switch (type) { 1005 case PR_WRITE_EXCLUSIVE: 1006 return 1; 1007 case PR_EXCLUSIVE_ACCESS: 1008 return 2; 1009 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1010 return 3; 1011 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1012 return 4; 1013 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1014 return 5; 1015 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1016 return 6; 1017 default: 1018 return 0; 1019 } 1020 }; 1021 1022 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 1023 u64 key, u64 sa_key, u8 op) 1024 { 1025 struct nvme_ns *ns = bdev->bd_disk->private_data; 1026 struct nvme_command c; 1027 u8 data[16] = { 0, }; 1028 1029 put_unaligned_le64(key, &data[0]); 1030 put_unaligned_le64(sa_key, &data[8]); 1031 1032 memset(&c, 0, sizeof(c)); 1033 c.common.opcode = op; 1034 c.common.nsid = cpu_to_le32(ns->ns_id); 1035 c.common.cdw10[0] = cpu_to_le32(cdw10); 1036 1037 return nvme_submit_sync_cmd(ns->queue, &c, data, 16); 1038 } 1039 1040 static int nvme_pr_register(struct block_device *bdev, u64 old, 1041 u64 new, unsigned flags) 1042 { 1043 u32 cdw10; 1044 1045 if (flags & ~PR_FL_IGNORE_KEY) 1046 return -EOPNOTSUPP; 1047 1048 cdw10 = old ? 2 : 0; 1049 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 1050 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 1051 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 1052 } 1053 1054 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 1055 enum pr_type type, unsigned flags) 1056 { 1057 u32 cdw10; 1058 1059 if (flags & ~PR_FL_IGNORE_KEY) 1060 return -EOPNOTSUPP; 1061 1062 cdw10 = nvme_pr_type(type) << 8; 1063 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 1064 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 1065 } 1066 1067 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 1068 enum pr_type type, bool abort) 1069 { 1070 u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1; 1071 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 1072 } 1073 1074 static int nvme_pr_clear(struct block_device *bdev, u64 key) 1075 { 1076 u32 cdw10 = 1 | (key ? 1 << 3 : 0); 1077 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); 1078 } 1079 1080 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1081 { 1082 u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0; 1083 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 1084 } 1085 1086 static const struct pr_ops nvme_pr_ops = { 1087 .pr_register = nvme_pr_register, 1088 .pr_reserve = nvme_pr_reserve, 1089 .pr_release = nvme_pr_release, 1090 .pr_preempt = nvme_pr_preempt, 1091 .pr_clear = nvme_pr_clear, 1092 }; 1093 1094 #ifdef CONFIG_BLK_SED_OPAL 1095 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 1096 bool send) 1097 { 1098 struct nvme_ctrl *ctrl = data; 1099 struct nvme_command cmd; 1100 1101 memset(&cmd, 0, sizeof(cmd)); 1102 if (send) 1103 cmd.common.opcode = nvme_admin_security_send; 1104 else 1105 cmd.common.opcode = nvme_admin_security_recv; 1106 cmd.common.nsid = 0; 1107 cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 1108 cmd.common.cdw10[1] = cpu_to_le32(len); 1109 1110 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 1111 ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); 1112 } 1113 EXPORT_SYMBOL_GPL(nvme_sec_submit); 1114 #endif /* CONFIG_BLK_SED_OPAL */ 1115 1116 static const struct block_device_operations nvme_fops = { 1117 .owner = THIS_MODULE, 1118 .ioctl = nvme_ioctl, 1119 .compat_ioctl = nvme_compat_ioctl, 1120 .open = nvme_open, 1121 .release = nvme_release, 1122 .getgeo = nvme_getgeo, 1123 .revalidate_disk= nvme_revalidate_disk, 1124 .pr_ops = &nvme_pr_ops, 1125 }; 1126 1127 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 1128 { 1129 unsigned long timeout = 1130 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1131 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 1132 int ret; 1133 1134 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1135 if (csts == ~0) 1136 return -ENODEV; 1137 if ((csts & NVME_CSTS_RDY) == bit) 1138 break; 1139 1140 msleep(100); 1141 if (fatal_signal_pending(current)) 1142 return -EINTR; 1143 if (time_after(jiffies, timeout)) { 1144 dev_err(ctrl->device, 1145 "Device not ready; aborting %s\n", enabled ? 1146 "initialisation" : "reset"); 1147 return -ENODEV; 1148 } 1149 } 1150 1151 return ret; 1152 } 1153 1154 /* 1155 * If the device has been passed off to us in an enabled state, just clear 1156 * the enabled bit. The spec says we should set the 'shutdown notification 1157 * bits', but doing so may cause the device to complete commands to the 1158 * admin queue ... and we don't know what memory that might be pointing at! 1159 */ 1160 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1161 { 1162 int ret; 1163 1164 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1165 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 1166 1167 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1168 if (ret) 1169 return ret; 1170 1171 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 1172 msleep(NVME_QUIRK_DELAY_AMOUNT); 1173 1174 return nvme_wait_ready(ctrl, cap, false); 1175 } 1176 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 1177 1178 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1179 { 1180 /* 1181 * Default to a 4K page size, with the intention to update this 1182 * path in the future to accomodate architectures with differing 1183 * kernel and IO page sizes. 1184 */ 1185 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12; 1186 int ret; 1187 1188 if (page_shift < dev_page_min) { 1189 dev_err(ctrl->device, 1190 "Minimum device page size %u too large for host (%u)\n", 1191 1 << dev_page_min, 1 << page_shift); 1192 return -ENODEV; 1193 } 1194 1195 ctrl->page_size = 1 << page_shift; 1196 1197 ctrl->ctrl_config = NVME_CC_CSS_NVM; 1198 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; 1199 ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 1200 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1201 ctrl->ctrl_config |= NVME_CC_ENABLE; 1202 1203 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1204 if (ret) 1205 return ret; 1206 return nvme_wait_ready(ctrl, cap, true); 1207 } 1208 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 1209 1210 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 1211 { 1212 unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies; 1213 u32 csts; 1214 int ret; 1215 1216 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1217 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 1218 1219 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1220 if (ret) 1221 return ret; 1222 1223 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1224 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 1225 break; 1226 1227 msleep(100); 1228 if (fatal_signal_pending(current)) 1229 return -EINTR; 1230 if (time_after(jiffies, timeout)) { 1231 dev_err(ctrl->device, 1232 "Device shutdown incomplete; abort shutdown\n"); 1233 return -ENODEV; 1234 } 1235 } 1236 1237 return ret; 1238 } 1239 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 1240 1241 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1242 struct request_queue *q) 1243 { 1244 bool vwc = false; 1245 1246 if (ctrl->max_hw_sectors) { 1247 u32 max_segments = 1248 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 1249 1250 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1251 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1252 } 1253 if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) 1254 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); 1255 blk_queue_virt_boundary(q, ctrl->page_size - 1); 1256 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 1257 vwc = true; 1258 blk_queue_write_cache(q, vwc, vwc); 1259 } 1260 1261 static void nvme_configure_apst(struct nvme_ctrl *ctrl) 1262 { 1263 /* 1264 * APST (Autonomous Power State Transition) lets us program a 1265 * table of power state transitions that the controller will 1266 * perform automatically. We configure it with a simple 1267 * heuristic: we are willing to spend at most 2% of the time 1268 * transitioning between power states. Therefore, when running 1269 * in any given state, we will enter the next lower-power 1270 * non-operational state after waiting 100 * (enlat + exlat) 1271 * microseconds, as long as that state's total latency is under 1272 * the requested maximum latency. 1273 * 1274 * We will not autonomously enter any non-operational state for 1275 * which the total latency exceeds ps_max_latency_us. Users 1276 * can set ps_max_latency_us to zero to turn off APST. 1277 */ 1278 1279 unsigned apste; 1280 struct nvme_feat_auto_pst *table; 1281 int ret; 1282 1283 /* 1284 * If APST isn't supported or if we haven't been initialized yet, 1285 * then don't do anything. 1286 */ 1287 if (!ctrl->apsta) 1288 return; 1289 1290 if (ctrl->npss > 31) { 1291 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 1292 return; 1293 } 1294 1295 table = kzalloc(sizeof(*table), GFP_KERNEL); 1296 if (!table) 1297 return; 1298 1299 if (ctrl->ps_max_latency_us == 0) { 1300 /* Turn off APST. */ 1301 apste = 0; 1302 } else { 1303 __le64 target = cpu_to_le64(0); 1304 int state; 1305 1306 /* 1307 * Walk through all states from lowest- to highest-power. 1308 * According to the spec, lower-numbered states use more 1309 * power. NPSS, despite the name, is the index of the 1310 * lowest-power state, not the number of states. 1311 */ 1312 for (state = (int)ctrl->npss; state >= 0; state--) { 1313 u64 total_latency_us, transition_ms; 1314 1315 if (target) 1316 table->entries[state] = target; 1317 1318 /* 1319 * Is this state a useful non-operational state for 1320 * higher-power states to autonomously transition to? 1321 */ 1322 if (!(ctrl->psd[state].flags & 1323 NVME_PS_FLAGS_NON_OP_STATE)) 1324 continue; 1325 1326 total_latency_us = 1327 (u64)le32_to_cpu(ctrl->psd[state].entry_lat) + 1328 + le32_to_cpu(ctrl->psd[state].exit_lat); 1329 if (total_latency_us > ctrl->ps_max_latency_us) 1330 continue; 1331 1332 /* 1333 * This state is good. Use it as the APST idle 1334 * target for higher power states. 1335 */ 1336 transition_ms = total_latency_us + 19; 1337 do_div(transition_ms, 20); 1338 if (transition_ms > (1 << 24) - 1) 1339 transition_ms = (1 << 24) - 1; 1340 1341 target = cpu_to_le64((state << 3) | 1342 (transition_ms << 8)); 1343 } 1344 1345 apste = 1; 1346 } 1347 1348 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 1349 table, sizeof(*table), NULL); 1350 if (ret) 1351 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 1352 1353 kfree(table); 1354 } 1355 1356 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 1357 { 1358 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1359 u64 latency; 1360 1361 switch (val) { 1362 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 1363 case PM_QOS_LATENCY_ANY: 1364 latency = U64_MAX; 1365 break; 1366 1367 default: 1368 latency = val; 1369 } 1370 1371 if (ctrl->ps_max_latency_us != latency) { 1372 ctrl->ps_max_latency_us = latency; 1373 nvme_configure_apst(ctrl); 1374 } 1375 } 1376 1377 struct nvme_core_quirk_entry { 1378 /* 1379 * NVMe model and firmware strings are padded with spaces. For 1380 * simplicity, strings in the quirk table are padded with NULLs 1381 * instead. 1382 */ 1383 u16 vid; 1384 const char *mn; 1385 const char *fr; 1386 unsigned long quirks; 1387 }; 1388 1389 static const struct nvme_core_quirk_entry core_quirks[] = { 1390 /* 1391 * Seen on a Samsung "SM951 NVMe SAMSUNG 256GB": using APST causes 1392 * the controller to go out to lunch. It dies when the watchdog 1393 * timer reads CSTS and gets 0xffffffff. 1394 */ 1395 { 1396 .vid = 0x144d, 1397 .fr = "BXW75D0Q", 1398 .quirks = NVME_QUIRK_NO_APST, 1399 }, 1400 }; 1401 1402 /* match is null-terminated but idstr is space-padded. */ 1403 static bool string_matches(const char *idstr, const char *match, size_t len) 1404 { 1405 size_t matchlen; 1406 1407 if (!match) 1408 return true; 1409 1410 matchlen = strlen(match); 1411 WARN_ON_ONCE(matchlen > len); 1412 1413 if (memcmp(idstr, match, matchlen)) 1414 return false; 1415 1416 for (; matchlen < len; matchlen++) 1417 if (idstr[matchlen] != ' ') 1418 return false; 1419 1420 return true; 1421 } 1422 1423 static bool quirk_matches(const struct nvme_id_ctrl *id, 1424 const struct nvme_core_quirk_entry *q) 1425 { 1426 return q->vid == le16_to_cpu(id->vid) && 1427 string_matches(id->mn, q->mn, sizeof(id->mn)) && 1428 string_matches(id->fr, q->fr, sizeof(id->fr)); 1429 } 1430 1431 /* 1432 * Initialize the cached copies of the Identify data and various controller 1433 * register in our nvme_ctrl structure. This should be called as soon as 1434 * the admin queue is fully up and running. 1435 */ 1436 int nvme_init_identify(struct nvme_ctrl *ctrl) 1437 { 1438 struct nvme_id_ctrl *id; 1439 u64 cap; 1440 int ret, page_shift; 1441 u32 max_hw_sectors; 1442 u8 prev_apsta; 1443 1444 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 1445 if (ret) { 1446 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 1447 return ret; 1448 } 1449 1450 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); 1451 if (ret) { 1452 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 1453 return ret; 1454 } 1455 page_shift = NVME_CAP_MPSMIN(cap) + 12; 1456 1457 if (ctrl->vs >= NVME_VS(1, 1, 0)) 1458 ctrl->subsystem = NVME_CAP_NSSRC(cap); 1459 1460 ret = nvme_identify_ctrl(ctrl, &id); 1461 if (ret) { 1462 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 1463 return -EIO; 1464 } 1465 1466 if (!ctrl->identified) { 1467 /* 1468 * Check for quirks. Quirk can depend on firmware version, 1469 * so, in principle, the set of quirks present can change 1470 * across a reset. As a possible future enhancement, we 1471 * could re-scan for quirks every time we reinitialize 1472 * the device, but we'd have to make sure that the driver 1473 * behaves intelligently if the quirks change. 1474 */ 1475 1476 int i; 1477 1478 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 1479 if (quirk_matches(id, &core_quirks[i])) 1480 ctrl->quirks |= core_quirks[i].quirks; 1481 } 1482 } 1483 1484 ctrl->oacs = le16_to_cpu(id->oacs); 1485 ctrl->vid = le16_to_cpu(id->vid); 1486 ctrl->oncs = le16_to_cpup(&id->oncs); 1487 atomic_set(&ctrl->abort_limit, id->acl + 1); 1488 ctrl->vwc = id->vwc; 1489 ctrl->cntlid = le16_to_cpup(&id->cntlid); 1490 memcpy(ctrl->serial, id->sn, sizeof(id->sn)); 1491 memcpy(ctrl->model, id->mn, sizeof(id->mn)); 1492 memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr)); 1493 if (id->mdts) 1494 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 1495 else 1496 max_hw_sectors = UINT_MAX; 1497 ctrl->max_hw_sectors = 1498 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 1499 1500 nvme_set_queue_limits(ctrl, ctrl->admin_q); 1501 ctrl->sgls = le32_to_cpu(id->sgls); 1502 ctrl->kas = le16_to_cpu(id->kas); 1503 1504 ctrl->npss = id->npss; 1505 prev_apsta = ctrl->apsta; 1506 ctrl->apsta = (ctrl->quirks & NVME_QUIRK_NO_APST) ? 0 : id->apsta; 1507 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 1508 1509 if (ctrl->ops->is_fabrics) { 1510 ctrl->icdoff = le16_to_cpu(id->icdoff); 1511 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 1512 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 1513 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 1514 1515 /* 1516 * In fabrics we need to verify the cntlid matches the 1517 * admin connect 1518 */ 1519 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) 1520 ret = -EINVAL; 1521 1522 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 1523 dev_err(ctrl->dev, 1524 "keep-alive support is mandatory for fabrics\n"); 1525 ret = -EINVAL; 1526 } 1527 } else { 1528 ctrl->cntlid = le16_to_cpu(id->cntlid); 1529 } 1530 1531 kfree(id); 1532 1533 if (ctrl->apsta && !prev_apsta) 1534 dev_pm_qos_expose_latency_tolerance(ctrl->device); 1535 else if (!ctrl->apsta && prev_apsta) 1536 dev_pm_qos_hide_latency_tolerance(ctrl->device); 1537 1538 nvme_configure_apst(ctrl); 1539 1540 ctrl->identified = true; 1541 1542 return ret; 1543 } 1544 EXPORT_SYMBOL_GPL(nvme_init_identify); 1545 1546 static int nvme_dev_open(struct inode *inode, struct file *file) 1547 { 1548 struct nvme_ctrl *ctrl; 1549 int instance = iminor(inode); 1550 int ret = -ENODEV; 1551 1552 spin_lock(&dev_list_lock); 1553 list_for_each_entry(ctrl, &nvme_ctrl_list, node) { 1554 if (ctrl->instance != instance) 1555 continue; 1556 1557 if (!ctrl->admin_q) { 1558 ret = -EWOULDBLOCK; 1559 break; 1560 } 1561 if (!kref_get_unless_zero(&ctrl->kref)) 1562 break; 1563 file->private_data = ctrl; 1564 ret = 0; 1565 break; 1566 } 1567 spin_unlock(&dev_list_lock); 1568 1569 return ret; 1570 } 1571 1572 static int nvme_dev_release(struct inode *inode, struct file *file) 1573 { 1574 nvme_put_ctrl(file->private_data); 1575 return 0; 1576 } 1577 1578 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 1579 { 1580 struct nvme_ns *ns; 1581 int ret; 1582 1583 mutex_lock(&ctrl->namespaces_mutex); 1584 if (list_empty(&ctrl->namespaces)) { 1585 ret = -ENOTTY; 1586 goto out_unlock; 1587 } 1588 1589 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 1590 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 1591 dev_warn(ctrl->device, 1592 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 1593 ret = -EINVAL; 1594 goto out_unlock; 1595 } 1596 1597 dev_warn(ctrl->device, 1598 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 1599 kref_get(&ns->kref); 1600 mutex_unlock(&ctrl->namespaces_mutex); 1601 1602 ret = nvme_user_cmd(ctrl, ns, argp); 1603 nvme_put_ns(ns); 1604 return ret; 1605 1606 out_unlock: 1607 mutex_unlock(&ctrl->namespaces_mutex); 1608 return ret; 1609 } 1610 1611 static long nvme_dev_ioctl(struct file *file, unsigned int cmd, 1612 unsigned long arg) 1613 { 1614 struct nvme_ctrl *ctrl = file->private_data; 1615 void __user *argp = (void __user *)arg; 1616 1617 switch (cmd) { 1618 case NVME_IOCTL_ADMIN_CMD: 1619 return nvme_user_cmd(ctrl, NULL, argp); 1620 case NVME_IOCTL_IO_CMD: 1621 return nvme_dev_user_cmd(ctrl, argp); 1622 case NVME_IOCTL_RESET: 1623 dev_warn(ctrl->device, "resetting controller\n"); 1624 return ctrl->ops->reset_ctrl(ctrl); 1625 case NVME_IOCTL_SUBSYS_RESET: 1626 return nvme_reset_subsystem(ctrl); 1627 case NVME_IOCTL_RESCAN: 1628 nvme_queue_scan(ctrl); 1629 return 0; 1630 default: 1631 return -ENOTTY; 1632 } 1633 } 1634 1635 static const struct file_operations nvme_dev_fops = { 1636 .owner = THIS_MODULE, 1637 .open = nvme_dev_open, 1638 .release = nvme_dev_release, 1639 .unlocked_ioctl = nvme_dev_ioctl, 1640 .compat_ioctl = nvme_dev_ioctl, 1641 }; 1642 1643 static ssize_t nvme_sysfs_reset(struct device *dev, 1644 struct device_attribute *attr, const char *buf, 1645 size_t count) 1646 { 1647 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1648 int ret; 1649 1650 ret = ctrl->ops->reset_ctrl(ctrl); 1651 if (ret < 0) 1652 return ret; 1653 return count; 1654 } 1655 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 1656 1657 static ssize_t nvme_sysfs_rescan(struct device *dev, 1658 struct device_attribute *attr, const char *buf, 1659 size_t count) 1660 { 1661 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1662 1663 nvme_queue_scan(ctrl); 1664 return count; 1665 } 1666 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 1667 1668 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 1669 char *buf) 1670 { 1671 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); 1672 struct nvme_ctrl *ctrl = ns->ctrl; 1673 int serial_len = sizeof(ctrl->serial); 1674 int model_len = sizeof(ctrl->model); 1675 1676 if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) 1677 return sprintf(buf, "eui.%16phN\n", ns->uuid); 1678 1679 if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) 1680 return sprintf(buf, "eui.%8phN\n", ns->eui); 1681 1682 while (ctrl->serial[serial_len - 1] == ' ') 1683 serial_len--; 1684 while (ctrl->model[model_len - 1] == ' ') 1685 model_len--; 1686 1687 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, 1688 serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id); 1689 } 1690 static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL); 1691 1692 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 1693 char *buf) 1694 { 1695 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); 1696 return sprintf(buf, "%pU\n", ns->uuid); 1697 } 1698 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL); 1699 1700 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 1701 char *buf) 1702 { 1703 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); 1704 return sprintf(buf, "%8phd\n", ns->eui); 1705 } 1706 static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL); 1707 1708 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 1709 char *buf) 1710 { 1711 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); 1712 return sprintf(buf, "%d\n", ns->ns_id); 1713 } 1714 static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL); 1715 1716 static struct attribute *nvme_ns_attrs[] = { 1717 &dev_attr_wwid.attr, 1718 &dev_attr_uuid.attr, 1719 &dev_attr_eui.attr, 1720 &dev_attr_nsid.attr, 1721 NULL, 1722 }; 1723 1724 static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, 1725 struct attribute *a, int n) 1726 { 1727 struct device *dev = container_of(kobj, struct device, kobj); 1728 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); 1729 1730 if (a == &dev_attr_uuid.attr) { 1731 if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) 1732 return 0; 1733 } 1734 if (a == &dev_attr_eui.attr) { 1735 if (!memchr_inv(ns->eui, 0, sizeof(ns->eui))) 1736 return 0; 1737 } 1738 return a->mode; 1739 } 1740 1741 static const struct attribute_group nvme_ns_attr_group = { 1742 .attrs = nvme_ns_attrs, 1743 .is_visible = nvme_ns_attrs_are_visible, 1744 }; 1745 1746 #define nvme_show_str_function(field) \ 1747 static ssize_t field##_show(struct device *dev, \ 1748 struct device_attribute *attr, char *buf) \ 1749 { \ 1750 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 1751 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \ 1752 } \ 1753 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 1754 1755 #define nvme_show_int_function(field) \ 1756 static ssize_t field##_show(struct device *dev, \ 1757 struct device_attribute *attr, char *buf) \ 1758 { \ 1759 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 1760 return sprintf(buf, "%d\n", ctrl->field); \ 1761 } \ 1762 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 1763 1764 nvme_show_str_function(model); 1765 nvme_show_str_function(serial); 1766 nvme_show_str_function(firmware_rev); 1767 nvme_show_int_function(cntlid); 1768 1769 static ssize_t nvme_sysfs_delete(struct device *dev, 1770 struct device_attribute *attr, const char *buf, 1771 size_t count) 1772 { 1773 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1774 1775 if (device_remove_file_self(dev, attr)) 1776 ctrl->ops->delete_ctrl(ctrl); 1777 return count; 1778 } 1779 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 1780 1781 static ssize_t nvme_sysfs_show_transport(struct device *dev, 1782 struct device_attribute *attr, 1783 char *buf) 1784 { 1785 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1786 1787 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); 1788 } 1789 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 1790 1791 static ssize_t nvme_sysfs_show_state(struct device *dev, 1792 struct device_attribute *attr, 1793 char *buf) 1794 { 1795 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1796 static const char *const state_name[] = { 1797 [NVME_CTRL_NEW] = "new", 1798 [NVME_CTRL_LIVE] = "live", 1799 [NVME_CTRL_RESETTING] = "resetting", 1800 [NVME_CTRL_RECONNECTING]= "reconnecting", 1801 [NVME_CTRL_DELETING] = "deleting", 1802 [NVME_CTRL_DEAD] = "dead", 1803 }; 1804 1805 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 1806 state_name[ctrl->state]) 1807 return sprintf(buf, "%s\n", state_name[ctrl->state]); 1808 1809 return sprintf(buf, "unknown state\n"); 1810 } 1811 1812 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 1813 1814 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 1815 struct device_attribute *attr, 1816 char *buf) 1817 { 1818 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1819 1820 return snprintf(buf, PAGE_SIZE, "%s\n", 1821 ctrl->ops->get_subsysnqn(ctrl)); 1822 } 1823 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 1824 1825 static ssize_t nvme_sysfs_show_address(struct device *dev, 1826 struct device_attribute *attr, 1827 char *buf) 1828 { 1829 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1830 1831 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 1832 } 1833 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 1834 1835 static struct attribute *nvme_dev_attrs[] = { 1836 &dev_attr_reset_controller.attr, 1837 &dev_attr_rescan_controller.attr, 1838 &dev_attr_model.attr, 1839 &dev_attr_serial.attr, 1840 &dev_attr_firmware_rev.attr, 1841 &dev_attr_cntlid.attr, 1842 &dev_attr_delete_controller.attr, 1843 &dev_attr_transport.attr, 1844 &dev_attr_subsysnqn.attr, 1845 &dev_attr_address.attr, 1846 &dev_attr_state.attr, 1847 NULL 1848 }; 1849 1850 #define CHECK_ATTR(ctrl, a, name) \ 1851 if ((a) == &dev_attr_##name.attr && \ 1852 !(ctrl)->ops->get_##name) \ 1853 return 0 1854 1855 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 1856 struct attribute *a, int n) 1857 { 1858 struct device *dev = container_of(kobj, struct device, kobj); 1859 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1860 1861 if (a == &dev_attr_delete_controller.attr) { 1862 if (!ctrl->ops->delete_ctrl) 1863 return 0; 1864 } 1865 1866 CHECK_ATTR(ctrl, a, subsysnqn); 1867 CHECK_ATTR(ctrl, a, address); 1868 1869 return a->mode; 1870 } 1871 1872 static struct attribute_group nvme_dev_attrs_group = { 1873 .attrs = nvme_dev_attrs, 1874 .is_visible = nvme_dev_attrs_are_visible, 1875 }; 1876 1877 static const struct attribute_group *nvme_dev_attr_groups[] = { 1878 &nvme_dev_attrs_group, 1879 NULL, 1880 }; 1881 1882 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) 1883 { 1884 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 1885 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 1886 1887 return nsa->ns_id - nsb->ns_id; 1888 } 1889 1890 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 1891 { 1892 struct nvme_ns *ns, *ret = NULL; 1893 1894 mutex_lock(&ctrl->namespaces_mutex); 1895 list_for_each_entry(ns, &ctrl->namespaces, list) { 1896 if (ns->ns_id == nsid) { 1897 kref_get(&ns->kref); 1898 ret = ns; 1899 break; 1900 } 1901 if (ns->ns_id > nsid) 1902 break; 1903 } 1904 mutex_unlock(&ctrl->namespaces_mutex); 1905 return ret; 1906 } 1907 1908 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 1909 { 1910 struct nvme_ns *ns; 1911 struct gendisk *disk; 1912 struct nvme_id_ns *id; 1913 char disk_name[DISK_NAME_LEN]; 1914 int node = dev_to_node(ctrl->dev); 1915 1916 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 1917 if (!ns) 1918 return; 1919 1920 ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL); 1921 if (ns->instance < 0) 1922 goto out_free_ns; 1923 1924 ns->queue = blk_mq_init_queue(ctrl->tagset); 1925 if (IS_ERR(ns->queue)) 1926 goto out_release_instance; 1927 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1928 ns->queue->queuedata = ns; 1929 ns->ctrl = ctrl; 1930 1931 kref_init(&ns->kref); 1932 ns->ns_id = nsid; 1933 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 1934 1935 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1936 nvme_set_queue_limits(ctrl, ns->queue); 1937 1938 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance); 1939 1940 if (nvme_revalidate_ns(ns, &id)) 1941 goto out_free_queue; 1942 1943 if (nvme_nvm_ns_supported(ns, id) && 1944 nvme_nvm_register(ns, disk_name, node)) { 1945 dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__); 1946 goto out_free_id; 1947 } 1948 1949 disk = alloc_disk_node(0, node); 1950 if (!disk) 1951 goto out_free_id; 1952 1953 disk->fops = &nvme_fops; 1954 disk->private_data = ns; 1955 disk->queue = ns->queue; 1956 disk->flags = GENHD_FL_EXT_DEVT; 1957 memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); 1958 ns->disk = disk; 1959 1960 __nvme_revalidate_disk(disk, id); 1961 1962 mutex_lock(&ctrl->namespaces_mutex); 1963 list_add_tail(&ns->list, &ctrl->namespaces); 1964 mutex_unlock(&ctrl->namespaces_mutex); 1965 1966 kref_get(&ctrl->kref); 1967 1968 kfree(id); 1969 1970 device_add_disk(ctrl->device, ns->disk); 1971 if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, 1972 &nvme_ns_attr_group)) 1973 pr_warn("%s: failed to create sysfs group for identification\n", 1974 ns->disk->disk_name); 1975 if (ns->ndev && nvme_nvm_register_sysfs(ns)) 1976 pr_warn("%s: failed to register lightnvm sysfs group for identification\n", 1977 ns->disk->disk_name); 1978 return; 1979 out_free_id: 1980 kfree(id); 1981 out_free_queue: 1982 blk_cleanup_queue(ns->queue); 1983 out_release_instance: 1984 ida_simple_remove(&ctrl->ns_ida, ns->instance); 1985 out_free_ns: 1986 kfree(ns); 1987 } 1988 1989 static void nvme_ns_remove(struct nvme_ns *ns) 1990 { 1991 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 1992 return; 1993 1994 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 1995 if (blk_get_integrity(ns->disk)) 1996 blk_integrity_unregister(ns->disk); 1997 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 1998 &nvme_ns_attr_group); 1999 if (ns->ndev) 2000 nvme_nvm_unregister_sysfs(ns); 2001 del_gendisk(ns->disk); 2002 blk_mq_abort_requeue_list(ns->queue); 2003 blk_cleanup_queue(ns->queue); 2004 } 2005 2006 mutex_lock(&ns->ctrl->namespaces_mutex); 2007 list_del_init(&ns->list); 2008 mutex_unlock(&ns->ctrl->namespaces_mutex); 2009 2010 nvme_put_ns(ns); 2011 } 2012 2013 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) 2014 { 2015 struct nvme_ns *ns; 2016 2017 ns = nvme_find_get_ns(ctrl, nsid); 2018 if (ns) { 2019 if (ns->disk && revalidate_disk(ns->disk)) 2020 nvme_ns_remove(ns); 2021 nvme_put_ns(ns); 2022 } else 2023 nvme_alloc_ns(ctrl, nsid); 2024 } 2025 2026 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 2027 unsigned nsid) 2028 { 2029 struct nvme_ns *ns, *next; 2030 2031 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 2032 if (ns->ns_id > nsid) 2033 nvme_ns_remove(ns); 2034 } 2035 } 2036 2037 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) 2038 { 2039 struct nvme_ns *ns; 2040 __le32 *ns_list; 2041 unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); 2042 int ret = 0; 2043 2044 ns_list = kzalloc(0x1000, GFP_KERNEL); 2045 if (!ns_list) 2046 return -ENOMEM; 2047 2048 for (i = 0; i < num_lists; i++) { 2049 ret = nvme_identify_ns_list(ctrl, prev, ns_list); 2050 if (ret) 2051 goto free; 2052 2053 for (j = 0; j < min(nn, 1024U); j++) { 2054 nsid = le32_to_cpu(ns_list[j]); 2055 if (!nsid) 2056 goto out; 2057 2058 nvme_validate_ns(ctrl, nsid); 2059 2060 while (++prev < nsid) { 2061 ns = nvme_find_get_ns(ctrl, prev); 2062 if (ns) { 2063 nvme_ns_remove(ns); 2064 nvme_put_ns(ns); 2065 } 2066 } 2067 } 2068 nn -= j; 2069 } 2070 out: 2071 nvme_remove_invalid_namespaces(ctrl, prev); 2072 free: 2073 kfree(ns_list); 2074 return ret; 2075 } 2076 2077 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) 2078 { 2079 unsigned i; 2080 2081 for (i = 1; i <= nn; i++) 2082 nvme_validate_ns(ctrl, i); 2083 2084 nvme_remove_invalid_namespaces(ctrl, nn); 2085 } 2086 2087 static void nvme_scan_work(struct work_struct *work) 2088 { 2089 struct nvme_ctrl *ctrl = 2090 container_of(work, struct nvme_ctrl, scan_work); 2091 struct nvme_id_ctrl *id; 2092 unsigned nn; 2093 2094 if (ctrl->state != NVME_CTRL_LIVE) 2095 return; 2096 2097 if (nvme_identify_ctrl(ctrl, &id)) 2098 return; 2099 2100 nn = le32_to_cpu(id->nn); 2101 if (ctrl->vs >= NVME_VS(1, 1, 0) && 2102 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 2103 if (!nvme_scan_ns_list(ctrl, nn)) 2104 goto done; 2105 } 2106 nvme_scan_ns_sequential(ctrl, nn); 2107 done: 2108 mutex_lock(&ctrl->namespaces_mutex); 2109 list_sort(NULL, &ctrl->namespaces, ns_cmp); 2110 mutex_unlock(&ctrl->namespaces_mutex); 2111 kfree(id); 2112 } 2113 2114 void nvme_queue_scan(struct nvme_ctrl *ctrl) 2115 { 2116 /* 2117 * Do not queue new scan work when a controller is reset during 2118 * removal. 2119 */ 2120 if (ctrl->state == NVME_CTRL_LIVE) 2121 schedule_work(&ctrl->scan_work); 2122 } 2123 EXPORT_SYMBOL_GPL(nvme_queue_scan); 2124 2125 /* 2126 * This function iterates the namespace list unlocked to allow recovery from 2127 * controller failure. It is up to the caller to ensure the namespace list is 2128 * not modified by scan work while this function is executing. 2129 */ 2130 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 2131 { 2132 struct nvme_ns *ns, *next; 2133 2134 /* 2135 * The dead states indicates the controller was not gracefully 2136 * disconnected. In that case, we won't be able to flush any data while 2137 * removing the namespaces' disks; fail all the queues now to avoid 2138 * potentially having to clean up the failed sync later. 2139 */ 2140 if (ctrl->state == NVME_CTRL_DEAD) 2141 nvme_kill_queues(ctrl); 2142 2143 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) 2144 nvme_ns_remove(ns); 2145 } 2146 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 2147 2148 static void nvme_async_event_work(struct work_struct *work) 2149 { 2150 struct nvme_ctrl *ctrl = 2151 container_of(work, struct nvme_ctrl, async_event_work); 2152 2153 spin_lock_irq(&ctrl->lock); 2154 while (ctrl->event_limit > 0) { 2155 int aer_idx = --ctrl->event_limit; 2156 2157 spin_unlock_irq(&ctrl->lock); 2158 ctrl->ops->submit_async_event(ctrl, aer_idx); 2159 spin_lock_irq(&ctrl->lock); 2160 } 2161 spin_unlock_irq(&ctrl->lock); 2162 } 2163 2164 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 2165 union nvme_result *res) 2166 { 2167 u32 result = le32_to_cpu(res->u32); 2168 bool done = true; 2169 2170 switch (le16_to_cpu(status) >> 1) { 2171 case NVME_SC_SUCCESS: 2172 done = false; 2173 /*FALLTHRU*/ 2174 case NVME_SC_ABORT_REQ: 2175 ++ctrl->event_limit; 2176 schedule_work(&ctrl->async_event_work); 2177 break; 2178 default: 2179 break; 2180 } 2181 2182 if (done) 2183 return; 2184 2185 switch (result & 0xff07) { 2186 case NVME_AER_NOTICE_NS_CHANGED: 2187 dev_info(ctrl->device, "rescanning\n"); 2188 nvme_queue_scan(ctrl); 2189 break; 2190 default: 2191 dev_warn(ctrl->device, "async event result %08x\n", result); 2192 } 2193 } 2194 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 2195 2196 void nvme_queue_async_events(struct nvme_ctrl *ctrl) 2197 { 2198 ctrl->event_limit = NVME_NR_AERS; 2199 schedule_work(&ctrl->async_event_work); 2200 } 2201 EXPORT_SYMBOL_GPL(nvme_queue_async_events); 2202 2203 static DEFINE_IDA(nvme_instance_ida); 2204 2205 static int nvme_set_instance(struct nvme_ctrl *ctrl) 2206 { 2207 int instance, error; 2208 2209 do { 2210 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) 2211 return -ENODEV; 2212 2213 spin_lock(&dev_list_lock); 2214 error = ida_get_new(&nvme_instance_ida, &instance); 2215 spin_unlock(&dev_list_lock); 2216 } while (error == -EAGAIN); 2217 2218 if (error) 2219 return -ENODEV; 2220 2221 ctrl->instance = instance; 2222 return 0; 2223 } 2224 2225 static void nvme_release_instance(struct nvme_ctrl *ctrl) 2226 { 2227 spin_lock(&dev_list_lock); 2228 ida_remove(&nvme_instance_ida, ctrl->instance); 2229 spin_unlock(&dev_list_lock); 2230 } 2231 2232 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 2233 { 2234 flush_work(&ctrl->async_event_work); 2235 flush_work(&ctrl->scan_work); 2236 nvme_remove_namespaces(ctrl); 2237 2238 device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); 2239 2240 spin_lock(&dev_list_lock); 2241 list_del(&ctrl->node); 2242 spin_unlock(&dev_list_lock); 2243 } 2244 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 2245 2246 static void nvme_free_ctrl(struct kref *kref) 2247 { 2248 struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref); 2249 2250 put_device(ctrl->device); 2251 nvme_release_instance(ctrl); 2252 ida_destroy(&ctrl->ns_ida); 2253 2254 ctrl->ops->free_ctrl(ctrl); 2255 } 2256 2257 void nvme_put_ctrl(struct nvme_ctrl *ctrl) 2258 { 2259 kref_put(&ctrl->kref, nvme_free_ctrl); 2260 } 2261 EXPORT_SYMBOL_GPL(nvme_put_ctrl); 2262 2263 /* 2264 * Initialize a NVMe controller structures. This needs to be called during 2265 * earliest initialization so that we have the initialized structured around 2266 * during probing. 2267 */ 2268 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 2269 const struct nvme_ctrl_ops *ops, unsigned long quirks) 2270 { 2271 int ret; 2272 2273 ctrl->state = NVME_CTRL_NEW; 2274 spin_lock_init(&ctrl->lock); 2275 INIT_LIST_HEAD(&ctrl->namespaces); 2276 mutex_init(&ctrl->namespaces_mutex); 2277 kref_init(&ctrl->kref); 2278 ctrl->dev = dev; 2279 ctrl->ops = ops; 2280 ctrl->quirks = quirks; 2281 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 2282 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 2283 2284 ret = nvme_set_instance(ctrl); 2285 if (ret) 2286 goto out; 2287 2288 ctrl->device = device_create_with_groups(nvme_class, ctrl->dev, 2289 MKDEV(nvme_char_major, ctrl->instance), 2290 ctrl, nvme_dev_attr_groups, 2291 "nvme%d", ctrl->instance); 2292 if (IS_ERR(ctrl->device)) { 2293 ret = PTR_ERR(ctrl->device); 2294 goto out_release_instance; 2295 } 2296 get_device(ctrl->device); 2297 ida_init(&ctrl->ns_ida); 2298 2299 spin_lock(&dev_list_lock); 2300 list_add_tail(&ctrl->node, &nvme_ctrl_list); 2301 spin_unlock(&dev_list_lock); 2302 2303 /* 2304 * Initialize latency tolerance controls. The sysfs files won't 2305 * be visible to userspace unless the device actually supports APST. 2306 */ 2307 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 2308 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 2309 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 2310 2311 return 0; 2312 out_release_instance: 2313 nvme_release_instance(ctrl); 2314 out: 2315 return ret; 2316 } 2317 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 2318 2319 /** 2320 * nvme_kill_queues(): Ends all namespace queues 2321 * @ctrl: the dead controller that needs to end 2322 * 2323 * Call this function when the driver determines it is unable to get the 2324 * controller in a state capable of servicing IO. 2325 */ 2326 void nvme_kill_queues(struct nvme_ctrl *ctrl) 2327 { 2328 struct nvme_ns *ns; 2329 2330 mutex_lock(&ctrl->namespaces_mutex); 2331 list_for_each_entry(ns, &ctrl->namespaces, list) { 2332 /* 2333 * Revalidating a dead namespace sets capacity to 0. This will 2334 * end buffered writers dirtying pages that can't be synced. 2335 */ 2336 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 2337 continue; 2338 revalidate_disk(ns->disk); 2339 blk_set_queue_dying(ns->queue); 2340 blk_mq_abort_requeue_list(ns->queue); 2341 blk_mq_start_stopped_hw_queues(ns->queue, true); 2342 } 2343 mutex_unlock(&ctrl->namespaces_mutex); 2344 } 2345 EXPORT_SYMBOL_GPL(nvme_kill_queues); 2346 2347 void nvme_stop_queues(struct nvme_ctrl *ctrl) 2348 { 2349 struct nvme_ns *ns; 2350 2351 mutex_lock(&ctrl->namespaces_mutex); 2352 list_for_each_entry(ns, &ctrl->namespaces, list) 2353 blk_mq_quiesce_queue(ns->queue); 2354 mutex_unlock(&ctrl->namespaces_mutex); 2355 } 2356 EXPORT_SYMBOL_GPL(nvme_stop_queues); 2357 2358 void nvme_start_queues(struct nvme_ctrl *ctrl) 2359 { 2360 struct nvme_ns *ns; 2361 2362 mutex_lock(&ctrl->namespaces_mutex); 2363 list_for_each_entry(ns, &ctrl->namespaces, list) { 2364 blk_mq_start_stopped_hw_queues(ns->queue, true); 2365 blk_mq_kick_requeue_list(ns->queue); 2366 } 2367 mutex_unlock(&ctrl->namespaces_mutex); 2368 } 2369 EXPORT_SYMBOL_GPL(nvme_start_queues); 2370 2371 int __init nvme_core_init(void) 2372 { 2373 int result; 2374 2375 result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme", 2376 &nvme_dev_fops); 2377 if (result < 0) 2378 return result; 2379 else if (result > 0) 2380 nvme_char_major = result; 2381 2382 nvme_class = class_create(THIS_MODULE, "nvme"); 2383 if (IS_ERR(nvme_class)) { 2384 result = PTR_ERR(nvme_class); 2385 goto unregister_chrdev; 2386 } 2387 2388 return 0; 2389 2390 unregister_chrdev: 2391 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); 2392 return result; 2393 } 2394 2395 void nvme_core_exit(void) 2396 { 2397 class_destroy(nvme_class); 2398 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); 2399 } 2400 2401 MODULE_LICENSE("GPL"); 2402 MODULE_VERSION("1.0"); 2403 module_init(nvme_core_init); 2404 module_exit(nvme_core_exit); 2405