1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/aer.h> 8 #include <linux/async.h> 9 #include <linux/blkdev.h> 10 #include <linux/blk-mq.h> 11 #include <linux/blk-mq-pci.h> 12 #include <linux/dmi.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/mm.h> 17 #include <linux/module.h> 18 #include <linux/mutex.h> 19 #include <linux/once.h> 20 #include <linux/pci.h> 21 #include <linux/t10-pi.h> 22 #include <linux/types.h> 23 #include <linux/io-64-nonatomic-lo-hi.h> 24 #include <linux/sed-opal.h> 25 #include <linux/pci-p2pdma.h> 26 27 #include "trace.h" 28 #include "nvme.h" 29 30 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 31 #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 32 33 #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 34 35 /* 36 * These can be higher, but we need to ensure that any command doesn't 37 * require an sg allocation that needs more than a page of data. 38 */ 39 #define NVME_MAX_KB_SZ 4096 40 #define NVME_MAX_SEGS 127 41 42 static int use_threaded_interrupts; 43 module_param(use_threaded_interrupts, int, 0); 44 45 static bool use_cmb_sqes = true; 46 module_param(use_cmb_sqes, bool, 0444); 47 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 48 49 static unsigned int max_host_mem_size_mb = 128; 50 module_param(max_host_mem_size_mb, uint, 0444); 51 MODULE_PARM_DESC(max_host_mem_size_mb, 52 "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 53 54 static unsigned int sgl_threshold = SZ_32K; 55 module_param(sgl_threshold, uint, 0644); 56 MODULE_PARM_DESC(sgl_threshold, 57 "Use SGLs when average request segment size is larger or equal to " 58 "this size. Use 0 to disable SGLs."); 59 60 static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 61 static const struct kernel_param_ops io_queue_depth_ops = { 62 .set = io_queue_depth_set, 63 .get = param_get_int, 64 }; 65 66 static int io_queue_depth = 1024; 67 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 68 MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); 69 70 static int queue_count_set(const char *val, const struct kernel_param *kp); 71 static const struct kernel_param_ops queue_count_ops = { 72 .set = queue_count_set, 73 .get = param_get_int, 74 }; 75 76 static int write_queues; 77 module_param_cb(write_queues, &queue_count_ops, &write_queues, 0644); 78 MODULE_PARM_DESC(write_queues, 79 "Number of queues to use for writes. If not set, reads and writes " 80 "will share a queue set."); 81 82 static int poll_queues = 0; 83 module_param_cb(poll_queues, &queue_count_ops, &poll_queues, 0644); 84 MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 85 86 struct nvme_dev; 87 struct nvme_queue; 88 89 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 90 static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode); 91 92 /* 93 * Represents an NVM Express device. Each nvme_dev is a PCI function. 94 */ 95 struct nvme_dev { 96 struct nvme_queue *queues; 97 struct blk_mq_tag_set tagset; 98 struct blk_mq_tag_set admin_tagset; 99 u32 __iomem *dbs; 100 struct device *dev; 101 struct dma_pool *prp_page_pool; 102 struct dma_pool *prp_small_pool; 103 unsigned online_queues; 104 unsigned max_qid; 105 unsigned io_queues[HCTX_MAX_TYPES]; 106 unsigned int num_vecs; 107 int q_depth; 108 u32 db_stride; 109 void __iomem *bar; 110 unsigned long bar_mapped_size; 111 struct work_struct remove_work; 112 struct mutex shutdown_lock; 113 bool subsystem; 114 u64 cmb_size; 115 bool cmb_use_sqes; 116 u32 cmbsz; 117 u32 cmbloc; 118 struct nvme_ctrl ctrl; 119 120 mempool_t *iod_mempool; 121 122 /* shadow doorbell buffer support: */ 123 u32 *dbbuf_dbs; 124 dma_addr_t dbbuf_dbs_dma_addr; 125 u32 *dbbuf_eis; 126 dma_addr_t dbbuf_eis_dma_addr; 127 128 /* host memory buffer support: */ 129 u64 host_mem_size; 130 u32 nr_host_mem_descs; 131 dma_addr_t host_mem_descs_dma; 132 struct nvme_host_mem_buf_desc *host_mem_descs; 133 void **host_mem_desc_bufs; 134 }; 135 136 static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 137 { 138 int n = 0, ret; 139 140 ret = kstrtoint(val, 10, &n); 141 if (ret != 0 || n < 2) 142 return -EINVAL; 143 144 return param_set_int(val, kp); 145 } 146 147 static int queue_count_set(const char *val, const struct kernel_param *kp) 148 { 149 int n = 0, ret; 150 151 ret = kstrtoint(val, 10, &n); 152 if (ret) 153 return ret; 154 if (n > num_possible_cpus()) 155 n = num_possible_cpus(); 156 157 return param_set_int(val, kp); 158 } 159 160 static inline unsigned int sq_idx(unsigned int qid, u32 stride) 161 { 162 return qid * 2 * stride; 163 } 164 165 static inline unsigned int cq_idx(unsigned int qid, u32 stride) 166 { 167 return (qid * 2 + 1) * stride; 168 } 169 170 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 171 { 172 return container_of(ctrl, struct nvme_dev, ctrl); 173 } 174 175 /* 176 * An NVM Express queue. Each device has at least two (one for admin 177 * commands and one for I/O commands). 178 */ 179 struct nvme_queue { 180 struct nvme_dev *dev; 181 spinlock_t sq_lock; 182 struct nvme_command *sq_cmds; 183 /* only used for poll queues: */ 184 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 185 volatile struct nvme_completion *cqes; 186 struct blk_mq_tags **tags; 187 dma_addr_t sq_dma_addr; 188 dma_addr_t cq_dma_addr; 189 u32 __iomem *q_db; 190 u16 q_depth; 191 u16 cq_vector; 192 u16 sq_tail; 193 u16 last_sq_tail; 194 u16 cq_head; 195 u16 last_cq_head; 196 u16 qid; 197 u8 cq_phase; 198 unsigned long flags; 199 #define NVMEQ_ENABLED 0 200 #define NVMEQ_SQ_CMB 1 201 #define NVMEQ_DELETE_ERROR 2 202 #define NVMEQ_POLLED 3 203 u32 *dbbuf_sq_db; 204 u32 *dbbuf_cq_db; 205 u32 *dbbuf_sq_ei; 206 u32 *dbbuf_cq_ei; 207 struct completion delete_done; 208 }; 209 210 /* 211 * The nvme_iod describes the data in an I/O, including the list of PRP 212 * entries. You can't see it in this data structure because C doesn't let 213 * me express that. Use nvme_init_iod to ensure there's enough space 214 * allocated to store the PRP list. 215 */ 216 struct nvme_iod { 217 struct nvme_request req; 218 struct nvme_queue *nvmeq; 219 bool use_sgl; 220 int aborted; 221 int npages; /* In the PRP list. 0 means small pool in use */ 222 int nents; /* Used in scatterlist */ 223 dma_addr_t first_dma; 224 struct scatterlist meta_sg; /* metadata requires single contiguous buffer */ 225 struct scatterlist *sg; 226 struct scatterlist inline_sg[0]; 227 }; 228 229 /* 230 * Check we didin't inadvertently grow the command struct 231 */ 232 static inline void _nvme_check_size(void) 233 { 234 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 235 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 236 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 237 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 238 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 239 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 240 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 241 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 242 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 243 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 244 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 245 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 246 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 247 } 248 249 static unsigned int max_io_queues(void) 250 { 251 return num_possible_cpus() + write_queues + poll_queues; 252 } 253 254 static unsigned int max_queue_count(void) 255 { 256 /* IO queues + admin queue */ 257 return 1 + max_io_queues(); 258 } 259 260 static inline unsigned int nvme_dbbuf_size(u32 stride) 261 { 262 return (max_queue_count() * 8 * stride); 263 } 264 265 static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 266 { 267 unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); 268 269 if (dev->dbbuf_dbs) 270 return 0; 271 272 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 273 &dev->dbbuf_dbs_dma_addr, 274 GFP_KERNEL); 275 if (!dev->dbbuf_dbs) 276 return -ENOMEM; 277 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 278 &dev->dbbuf_eis_dma_addr, 279 GFP_KERNEL); 280 if (!dev->dbbuf_eis) { 281 dma_free_coherent(dev->dev, mem_size, 282 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 283 dev->dbbuf_dbs = NULL; 284 return -ENOMEM; 285 } 286 287 return 0; 288 } 289 290 static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 291 { 292 unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); 293 294 if (dev->dbbuf_dbs) { 295 dma_free_coherent(dev->dev, mem_size, 296 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 297 dev->dbbuf_dbs = NULL; 298 } 299 if (dev->dbbuf_eis) { 300 dma_free_coherent(dev->dev, mem_size, 301 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 302 dev->dbbuf_eis = NULL; 303 } 304 } 305 306 static void nvme_dbbuf_init(struct nvme_dev *dev, 307 struct nvme_queue *nvmeq, int qid) 308 { 309 if (!dev->dbbuf_dbs || !qid) 310 return; 311 312 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 313 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 314 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 315 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 316 } 317 318 static void nvme_dbbuf_set(struct nvme_dev *dev) 319 { 320 struct nvme_command c; 321 322 if (!dev->dbbuf_dbs) 323 return; 324 325 memset(&c, 0, sizeof(c)); 326 c.dbbuf.opcode = nvme_admin_dbbuf; 327 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 328 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 329 330 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 331 dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 332 /* Free memory and continue on */ 333 nvme_dbbuf_dma_free(dev); 334 } 335 } 336 337 static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 338 { 339 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 340 } 341 342 /* Update dbbuf and return true if an MMIO is required */ 343 static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 344 volatile u32 *dbbuf_ei) 345 { 346 if (dbbuf_db) { 347 u16 old_value; 348 349 /* 350 * Ensure that the queue is written before updating 351 * the doorbell in memory 352 */ 353 wmb(); 354 355 old_value = *dbbuf_db; 356 *dbbuf_db = value; 357 358 /* 359 * Ensure that the doorbell is updated before reading the event 360 * index from memory. The controller needs to provide similar 361 * ordering to ensure the envent index is updated before reading 362 * the doorbell. 363 */ 364 mb(); 365 366 if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 367 return false; 368 } 369 370 return true; 371 } 372 373 /* 374 * Max size of iod being embedded in the request payload 375 */ 376 #define NVME_INT_PAGES 2 377 #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size) 378 379 /* 380 * Will slightly overestimate the number of pages needed. This is OK 381 * as it only leads to a small amount of wasted memory for the lifetime of 382 * the I/O. 383 */ 384 static int nvme_npages(unsigned size, struct nvme_dev *dev) 385 { 386 unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size, 387 dev->ctrl.page_size); 388 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 389 } 390 391 /* 392 * Calculates the number of pages needed for the SGL segments. For example a 4k 393 * page can accommodate 256 SGL descriptors. 394 */ 395 static int nvme_pci_npages_sgl(unsigned int num_seg) 396 { 397 return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE); 398 } 399 400 static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev, 401 unsigned int size, unsigned int nseg, bool use_sgl) 402 { 403 size_t alloc_size; 404 405 if (use_sgl) 406 alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg); 407 else 408 alloc_size = sizeof(__le64 *) * nvme_npages(size, dev); 409 410 return alloc_size + sizeof(struct scatterlist) * nseg; 411 } 412 413 static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl) 414 { 415 unsigned int alloc_size = nvme_pci_iod_alloc_size(dev, 416 NVME_INT_BYTES(dev), NVME_INT_PAGES, 417 use_sgl); 418 419 return sizeof(struct nvme_iod) + alloc_size; 420 } 421 422 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 423 unsigned int hctx_idx) 424 { 425 struct nvme_dev *dev = data; 426 struct nvme_queue *nvmeq = &dev->queues[0]; 427 428 WARN_ON(hctx_idx != 0); 429 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 430 WARN_ON(nvmeq->tags); 431 432 hctx->driver_data = nvmeq; 433 nvmeq->tags = &dev->admin_tagset.tags[0]; 434 return 0; 435 } 436 437 static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 438 { 439 struct nvme_queue *nvmeq = hctx->driver_data; 440 441 nvmeq->tags = NULL; 442 } 443 444 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 445 unsigned int hctx_idx) 446 { 447 struct nvme_dev *dev = data; 448 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; 449 450 if (!nvmeq->tags) 451 nvmeq->tags = &dev->tagset.tags[hctx_idx]; 452 453 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 454 hctx->driver_data = nvmeq; 455 return 0; 456 } 457 458 static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, 459 unsigned int hctx_idx, unsigned int numa_node) 460 { 461 struct nvme_dev *dev = set->driver_data; 462 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 463 int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; 464 struct nvme_queue *nvmeq = &dev->queues[queue_idx]; 465 466 BUG_ON(!nvmeq); 467 iod->nvmeq = nvmeq; 468 469 nvme_req(req)->ctrl = &dev->ctrl; 470 return 0; 471 } 472 473 static int queue_irq_offset(struct nvme_dev *dev) 474 { 475 /* if we have more than 1 vec, admin queue offsets us by 1 */ 476 if (dev->num_vecs > 1) 477 return 1; 478 479 return 0; 480 } 481 482 static int nvme_pci_map_queues(struct blk_mq_tag_set *set) 483 { 484 struct nvme_dev *dev = set->driver_data; 485 int i, qoff, offset; 486 487 offset = queue_irq_offset(dev); 488 for (i = 0, qoff = 0; i < set->nr_maps; i++) { 489 struct blk_mq_queue_map *map = &set->map[i]; 490 491 map->nr_queues = dev->io_queues[i]; 492 if (!map->nr_queues) { 493 BUG_ON(i == HCTX_TYPE_DEFAULT); 494 continue; 495 } 496 497 /* 498 * The poll queue(s) doesn't have an IRQ (and hence IRQ 499 * affinity), so use the regular blk-mq cpu mapping 500 */ 501 map->queue_offset = qoff; 502 if (i != HCTX_TYPE_POLL) 503 blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); 504 else 505 blk_mq_map_queues(map); 506 qoff += map->nr_queues; 507 offset += map->nr_queues; 508 } 509 510 return 0; 511 } 512 513 /* 514 * Write sq tail if we are asked to, or if the next command would wrap. 515 */ 516 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 517 { 518 if (!write_sq) { 519 u16 next_tail = nvmeq->sq_tail + 1; 520 521 if (next_tail == nvmeq->q_depth) 522 next_tail = 0; 523 if (next_tail != nvmeq->last_sq_tail) 524 return; 525 } 526 527 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 528 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 529 writel(nvmeq->sq_tail, nvmeq->q_db); 530 nvmeq->last_sq_tail = nvmeq->sq_tail; 531 } 532 533 /** 534 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 535 * @nvmeq: The queue to use 536 * @cmd: The command to send 537 * @write_sq: whether to write to the SQ doorbell 538 */ 539 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, 540 bool write_sq) 541 { 542 spin_lock(&nvmeq->sq_lock); 543 memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd)); 544 if (++nvmeq->sq_tail == nvmeq->q_depth) 545 nvmeq->sq_tail = 0; 546 nvme_write_sq_db(nvmeq, write_sq); 547 spin_unlock(&nvmeq->sq_lock); 548 } 549 550 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 551 { 552 struct nvme_queue *nvmeq = hctx->driver_data; 553 554 spin_lock(&nvmeq->sq_lock); 555 if (nvmeq->sq_tail != nvmeq->last_sq_tail) 556 nvme_write_sq_db(nvmeq, true); 557 spin_unlock(&nvmeq->sq_lock); 558 } 559 560 static void **nvme_pci_iod_list(struct request *req) 561 { 562 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 563 return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); 564 } 565 566 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) 567 { 568 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 569 int nseg = blk_rq_nr_phys_segments(req); 570 unsigned int avg_seg_size; 571 572 if (nseg == 0) 573 return false; 574 575 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 576 577 if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) 578 return false; 579 if (!iod->nvmeq->qid) 580 return false; 581 if (!sgl_threshold || avg_seg_size < sgl_threshold) 582 return false; 583 return true; 584 } 585 586 static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) 587 { 588 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 589 int nseg = blk_rq_nr_phys_segments(rq); 590 unsigned int size = blk_rq_payload_bytes(rq); 591 592 iod->use_sgl = nvme_pci_use_sgls(dev, rq); 593 594 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 595 iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); 596 if (!iod->sg) 597 return BLK_STS_RESOURCE; 598 } else { 599 iod->sg = iod->inline_sg; 600 } 601 602 iod->aborted = 0; 603 iod->npages = -1; 604 iod->nents = 0; 605 606 return BLK_STS_OK; 607 } 608 609 static void nvme_free_iod(struct nvme_dev *dev, struct request *req) 610 { 611 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 612 const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1; 613 dma_addr_t dma_addr = iod->first_dma, next_dma_addr; 614 615 int i; 616 617 if (iod->npages == 0) 618 dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], 619 dma_addr); 620 621 for (i = 0; i < iod->npages; i++) { 622 void *addr = nvme_pci_iod_list(req)[i]; 623 624 if (iod->use_sgl) { 625 struct nvme_sgl_desc *sg_list = addr; 626 627 next_dma_addr = 628 le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); 629 } else { 630 __le64 *prp_list = addr; 631 632 next_dma_addr = le64_to_cpu(prp_list[last_prp]); 633 } 634 635 dma_pool_free(dev->prp_page_pool, addr, dma_addr); 636 dma_addr = next_dma_addr; 637 } 638 639 if (iod->sg != iod->inline_sg) 640 mempool_free(iod->sg, dev->iod_mempool); 641 } 642 643 static void nvme_print_sgl(struct scatterlist *sgl, int nents) 644 { 645 int i; 646 struct scatterlist *sg; 647 648 for_each_sg(sgl, sg, nents, i) { 649 dma_addr_t phys = sg_phys(sg); 650 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 651 "dma_address:%pad dma_length:%d\n", 652 i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 653 sg_dma_len(sg)); 654 } 655 } 656 657 static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, 658 struct request *req, struct nvme_rw_command *cmnd) 659 { 660 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 661 struct dma_pool *pool; 662 int length = blk_rq_payload_bytes(req); 663 struct scatterlist *sg = iod->sg; 664 int dma_len = sg_dma_len(sg); 665 u64 dma_addr = sg_dma_address(sg); 666 u32 page_size = dev->ctrl.page_size; 667 int offset = dma_addr & (page_size - 1); 668 __le64 *prp_list; 669 void **list = nvme_pci_iod_list(req); 670 dma_addr_t prp_dma; 671 int nprps, i; 672 673 length -= (page_size - offset); 674 if (length <= 0) { 675 iod->first_dma = 0; 676 goto done; 677 } 678 679 dma_len -= (page_size - offset); 680 if (dma_len) { 681 dma_addr += (page_size - offset); 682 } else { 683 sg = sg_next(sg); 684 dma_addr = sg_dma_address(sg); 685 dma_len = sg_dma_len(sg); 686 } 687 688 if (length <= page_size) { 689 iod->first_dma = dma_addr; 690 goto done; 691 } 692 693 nprps = DIV_ROUND_UP(length, page_size); 694 if (nprps <= (256 / 8)) { 695 pool = dev->prp_small_pool; 696 iod->npages = 0; 697 } else { 698 pool = dev->prp_page_pool; 699 iod->npages = 1; 700 } 701 702 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 703 if (!prp_list) { 704 iod->first_dma = dma_addr; 705 iod->npages = -1; 706 return BLK_STS_RESOURCE; 707 } 708 list[0] = prp_list; 709 iod->first_dma = prp_dma; 710 i = 0; 711 for (;;) { 712 if (i == page_size >> 3) { 713 __le64 *old_prp_list = prp_list; 714 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 715 if (!prp_list) 716 return BLK_STS_RESOURCE; 717 list[iod->npages++] = prp_list; 718 prp_list[0] = old_prp_list[i - 1]; 719 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 720 i = 1; 721 } 722 prp_list[i++] = cpu_to_le64(dma_addr); 723 dma_len -= page_size; 724 dma_addr += page_size; 725 length -= page_size; 726 if (length <= 0) 727 break; 728 if (dma_len > 0) 729 continue; 730 if (unlikely(dma_len < 0)) 731 goto bad_sgl; 732 sg = sg_next(sg); 733 dma_addr = sg_dma_address(sg); 734 dma_len = sg_dma_len(sg); 735 } 736 737 done: 738 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 739 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); 740 741 return BLK_STS_OK; 742 743 bad_sgl: 744 WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), 745 "Invalid SGL for payload:%d nents:%d\n", 746 blk_rq_payload_bytes(req), iod->nents); 747 return BLK_STS_IOERR; 748 } 749 750 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 751 struct scatterlist *sg) 752 { 753 sge->addr = cpu_to_le64(sg_dma_address(sg)); 754 sge->length = cpu_to_le32(sg_dma_len(sg)); 755 sge->type = NVME_SGL_FMT_DATA_DESC << 4; 756 } 757 758 static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 759 dma_addr_t dma_addr, int entries) 760 { 761 sge->addr = cpu_to_le64(dma_addr); 762 if (entries < SGES_PER_PAGE) { 763 sge->length = cpu_to_le32(entries * sizeof(*sge)); 764 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 765 } else { 766 sge->length = cpu_to_le32(PAGE_SIZE); 767 sge->type = NVME_SGL_FMT_SEG_DESC << 4; 768 } 769 } 770 771 static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 772 struct request *req, struct nvme_rw_command *cmd, int entries) 773 { 774 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 775 struct dma_pool *pool; 776 struct nvme_sgl_desc *sg_list; 777 struct scatterlist *sg = iod->sg; 778 dma_addr_t sgl_dma; 779 int i = 0; 780 781 /* setting the transfer type as SGL */ 782 cmd->flags = NVME_CMD_SGL_METABUF; 783 784 if (entries == 1) { 785 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 786 return BLK_STS_OK; 787 } 788 789 if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { 790 pool = dev->prp_small_pool; 791 iod->npages = 0; 792 } else { 793 pool = dev->prp_page_pool; 794 iod->npages = 1; 795 } 796 797 sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 798 if (!sg_list) { 799 iod->npages = -1; 800 return BLK_STS_RESOURCE; 801 } 802 803 nvme_pci_iod_list(req)[0] = sg_list; 804 iod->first_dma = sgl_dma; 805 806 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); 807 808 do { 809 if (i == SGES_PER_PAGE) { 810 struct nvme_sgl_desc *old_sg_desc = sg_list; 811 struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; 812 813 sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 814 if (!sg_list) 815 return BLK_STS_RESOURCE; 816 817 i = 0; 818 nvme_pci_iod_list(req)[iod->npages++] = sg_list; 819 sg_list[i++] = *link; 820 nvme_pci_sgl_set_seg(link, sgl_dma, entries); 821 } 822 823 nvme_pci_sgl_set_data(&sg_list[i++], sg); 824 sg = sg_next(sg); 825 } while (--entries > 0); 826 827 return BLK_STS_OK; 828 } 829 830 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 831 struct nvme_command *cmnd) 832 { 833 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 834 struct request_queue *q = req->q; 835 enum dma_data_direction dma_dir = rq_data_dir(req) ? 836 DMA_TO_DEVICE : DMA_FROM_DEVICE; 837 blk_status_t ret = BLK_STS_IOERR; 838 int nr_mapped; 839 840 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 841 iod->nents = blk_rq_map_sg(q, req, iod->sg); 842 if (!iod->nents) 843 goto out; 844 845 ret = BLK_STS_RESOURCE; 846 847 if (is_pci_p2pdma_page(sg_page(iod->sg))) 848 nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents, 849 dma_dir); 850 else 851 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, 852 dma_dir, DMA_ATTR_NO_WARN); 853 if (!nr_mapped) 854 goto out; 855 856 if (iod->use_sgl) 857 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); 858 else 859 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 860 861 if (ret != BLK_STS_OK) 862 goto out_unmap; 863 864 ret = BLK_STS_IOERR; 865 if (blk_integrity_rq(req)) { 866 if (blk_rq_count_integrity_sg(q, req->bio) != 1) 867 goto out_unmap; 868 869 sg_init_table(&iod->meta_sg, 1); 870 if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) 871 goto out_unmap; 872 873 if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) 874 goto out_unmap; 875 876 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); 877 } 878 879 return BLK_STS_OK; 880 881 out_unmap: 882 dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 883 out: 884 return ret; 885 } 886 887 static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 888 { 889 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 890 enum dma_data_direction dma_dir = rq_data_dir(req) ? 891 DMA_TO_DEVICE : DMA_FROM_DEVICE; 892 893 if (iod->nents) { 894 /* P2PDMA requests do not need to be unmapped */ 895 if (!is_pci_p2pdma_page(sg_page(iod->sg))) 896 dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 897 898 if (blk_integrity_rq(req)) 899 dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); 900 } 901 902 nvme_cleanup_cmd(req); 903 nvme_free_iod(dev, req); 904 } 905 906 /* 907 * NOTE: ns is NULL when called on the admin queue. 908 */ 909 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 910 const struct blk_mq_queue_data *bd) 911 { 912 struct nvme_ns *ns = hctx->queue->queuedata; 913 struct nvme_queue *nvmeq = hctx->driver_data; 914 struct nvme_dev *dev = nvmeq->dev; 915 struct request *req = bd->rq; 916 struct nvme_command cmnd; 917 blk_status_t ret; 918 919 /* 920 * We should not need to do this, but we're still using this to 921 * ensure we can drain requests on a dying queue. 922 */ 923 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 924 return BLK_STS_IOERR; 925 926 ret = nvme_setup_cmd(ns, req, &cmnd); 927 if (ret) 928 return ret; 929 930 ret = nvme_init_iod(req, dev); 931 if (ret) 932 goto out_free_cmd; 933 934 if (blk_rq_nr_phys_segments(req)) { 935 ret = nvme_map_data(dev, req, &cmnd); 936 if (ret) 937 goto out_cleanup_iod; 938 } 939 940 blk_mq_start_request(req); 941 nvme_submit_cmd(nvmeq, &cmnd, bd->last); 942 return BLK_STS_OK; 943 out_cleanup_iod: 944 nvme_free_iod(dev, req); 945 out_free_cmd: 946 nvme_cleanup_cmd(req); 947 return ret; 948 } 949 950 static void nvme_pci_complete_rq(struct request *req) 951 { 952 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 953 954 nvme_unmap_data(iod->nvmeq->dev, req); 955 nvme_complete_rq(req); 956 } 957 958 /* We read the CQE phase first to check if the rest of the entry is valid */ 959 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 960 { 961 return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) == 962 nvmeq->cq_phase; 963 } 964 965 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 966 { 967 u16 head = nvmeq->cq_head; 968 969 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 970 nvmeq->dbbuf_cq_ei)) 971 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 972 } 973 974 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) 975 { 976 volatile struct nvme_completion *cqe = &nvmeq->cqes[idx]; 977 struct request *req; 978 979 if (unlikely(cqe->command_id >= nvmeq->q_depth)) { 980 dev_warn(nvmeq->dev->ctrl.device, 981 "invalid id %d completed on queue %d\n", 982 cqe->command_id, le16_to_cpu(cqe->sq_id)); 983 return; 984 } 985 986 /* 987 * AEN requests are special as they don't time out and can 988 * survive any kind of queue freeze and often don't respond to 989 * aborts. We don't even bother to allocate a struct request 990 * for them but rather special case them here. 991 */ 992 if (unlikely(nvmeq->qid == 0 && 993 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) { 994 nvme_complete_async_event(&nvmeq->dev->ctrl, 995 cqe->status, &cqe->result); 996 return; 997 } 998 999 req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); 1000 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 1001 nvme_end_request(req, cqe->status, cqe->result); 1002 } 1003 1004 static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end) 1005 { 1006 while (start != end) { 1007 nvme_handle_cqe(nvmeq, start); 1008 if (++start == nvmeq->q_depth) 1009 start = 0; 1010 } 1011 } 1012 1013 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 1014 { 1015 if (nvmeq->cq_head == nvmeq->q_depth - 1) { 1016 nvmeq->cq_head = 0; 1017 nvmeq->cq_phase = !nvmeq->cq_phase; 1018 } else { 1019 nvmeq->cq_head++; 1020 } 1021 } 1022 1023 static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start, 1024 u16 *end, unsigned int tag) 1025 { 1026 int found = 0; 1027 1028 *start = nvmeq->cq_head; 1029 while (nvme_cqe_pending(nvmeq)) { 1030 if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag) 1031 found++; 1032 nvme_update_cq_head(nvmeq); 1033 } 1034 *end = nvmeq->cq_head; 1035 1036 if (*start != *end) 1037 nvme_ring_cq_doorbell(nvmeq); 1038 return found; 1039 } 1040 1041 static irqreturn_t nvme_irq(int irq, void *data) 1042 { 1043 struct nvme_queue *nvmeq = data; 1044 irqreturn_t ret = IRQ_NONE; 1045 u16 start, end; 1046 1047 /* 1048 * The rmb/wmb pair ensures we see all updates from a previous run of 1049 * the irq handler, even if that was on another CPU. 1050 */ 1051 rmb(); 1052 if (nvmeq->cq_head != nvmeq->last_cq_head) 1053 ret = IRQ_HANDLED; 1054 nvme_process_cq(nvmeq, &start, &end, -1); 1055 nvmeq->last_cq_head = nvmeq->cq_head; 1056 wmb(); 1057 1058 if (start != end) { 1059 nvme_complete_cqes(nvmeq, start, end); 1060 return IRQ_HANDLED; 1061 } 1062 1063 return ret; 1064 } 1065 1066 static irqreturn_t nvme_irq_check(int irq, void *data) 1067 { 1068 struct nvme_queue *nvmeq = data; 1069 if (nvme_cqe_pending(nvmeq)) 1070 return IRQ_WAKE_THREAD; 1071 return IRQ_NONE; 1072 } 1073 1074 /* 1075 * Poll for completions any queue, including those not dedicated to polling. 1076 * Can be called from any context. 1077 */ 1078 static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag) 1079 { 1080 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1081 u16 start, end; 1082 int found; 1083 1084 /* 1085 * For a poll queue we need to protect against the polling thread 1086 * using the CQ lock. For normal interrupt driven threads we have 1087 * to disable the interrupt to avoid racing with it. 1088 */ 1089 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) { 1090 spin_lock(&nvmeq->cq_poll_lock); 1091 found = nvme_process_cq(nvmeq, &start, &end, tag); 1092 spin_unlock(&nvmeq->cq_poll_lock); 1093 } else { 1094 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1095 found = nvme_process_cq(nvmeq, &start, &end, tag); 1096 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1097 } 1098 1099 nvme_complete_cqes(nvmeq, start, end); 1100 return found; 1101 } 1102 1103 static int nvme_poll(struct blk_mq_hw_ctx *hctx) 1104 { 1105 struct nvme_queue *nvmeq = hctx->driver_data; 1106 u16 start, end; 1107 bool found; 1108 1109 if (!nvme_cqe_pending(nvmeq)) 1110 return 0; 1111 1112 spin_lock(&nvmeq->cq_poll_lock); 1113 found = nvme_process_cq(nvmeq, &start, &end, -1); 1114 spin_unlock(&nvmeq->cq_poll_lock); 1115 1116 nvme_complete_cqes(nvmeq, start, end); 1117 return found; 1118 } 1119 1120 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 1121 { 1122 struct nvme_dev *dev = to_nvme_dev(ctrl); 1123 struct nvme_queue *nvmeq = &dev->queues[0]; 1124 struct nvme_command c; 1125 1126 memset(&c, 0, sizeof(c)); 1127 c.common.opcode = nvme_admin_async_event; 1128 c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 1129 nvme_submit_cmd(nvmeq, &c, true); 1130 } 1131 1132 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 1133 { 1134 struct nvme_command c; 1135 1136 memset(&c, 0, sizeof(c)); 1137 c.delete_queue.opcode = opcode; 1138 c.delete_queue.qid = cpu_to_le16(id); 1139 1140 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1141 } 1142 1143 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1144 struct nvme_queue *nvmeq, s16 vector) 1145 { 1146 struct nvme_command c; 1147 int flags = NVME_QUEUE_PHYS_CONTIG; 1148 1149 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) 1150 flags |= NVME_CQ_IRQ_ENABLED; 1151 1152 /* 1153 * Note: we (ab)use the fact that the prp fields survive if no data 1154 * is attached to the request. 1155 */ 1156 memset(&c, 0, sizeof(c)); 1157 c.create_cq.opcode = nvme_admin_create_cq; 1158 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 1159 c.create_cq.cqid = cpu_to_le16(qid); 1160 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 1161 c.create_cq.cq_flags = cpu_to_le16(flags); 1162 c.create_cq.irq_vector = cpu_to_le16(vector); 1163 1164 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1165 } 1166 1167 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 1168 struct nvme_queue *nvmeq) 1169 { 1170 struct nvme_ctrl *ctrl = &dev->ctrl; 1171 struct nvme_command c; 1172 int flags = NVME_QUEUE_PHYS_CONTIG; 1173 1174 /* 1175 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 1176 * set. Since URGENT priority is zeroes, it makes all queues 1177 * URGENT. 1178 */ 1179 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 1180 flags |= NVME_SQ_PRIO_MEDIUM; 1181 1182 /* 1183 * Note: we (ab)use the fact that the prp fields survive if no data 1184 * is attached to the request. 1185 */ 1186 memset(&c, 0, sizeof(c)); 1187 c.create_sq.opcode = nvme_admin_create_sq; 1188 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 1189 c.create_sq.sqid = cpu_to_le16(qid); 1190 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 1191 c.create_sq.sq_flags = cpu_to_le16(flags); 1192 c.create_sq.cqid = cpu_to_le16(qid); 1193 1194 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1195 } 1196 1197 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 1198 { 1199 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 1200 } 1201 1202 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 1203 { 1204 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 1205 } 1206 1207 static void abort_endio(struct request *req, blk_status_t error) 1208 { 1209 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1210 struct nvme_queue *nvmeq = iod->nvmeq; 1211 1212 dev_warn(nvmeq->dev->ctrl.device, 1213 "Abort status: 0x%x", nvme_req(req)->status); 1214 atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1215 blk_mq_free_request(req); 1216 } 1217 1218 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1219 { 1220 1221 /* If true, indicates loss of adapter communication, possibly by a 1222 * NVMe Subsystem reset. 1223 */ 1224 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1225 1226 /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1227 switch (dev->ctrl.state) { 1228 case NVME_CTRL_RESETTING: 1229 case NVME_CTRL_CONNECTING: 1230 return false; 1231 default: 1232 break; 1233 } 1234 1235 /* We shouldn't reset unless the controller is on fatal error state 1236 * _or_ if we lost the communication with it. 1237 */ 1238 if (!(csts & NVME_CSTS_CFS) && !nssro) 1239 return false; 1240 1241 return true; 1242 } 1243 1244 static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1245 { 1246 /* Read a config register to help see what died. */ 1247 u16 pci_status; 1248 int result; 1249 1250 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1251 &pci_status); 1252 if (result == PCIBIOS_SUCCESSFUL) 1253 dev_warn(dev->ctrl.device, 1254 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1255 csts, pci_status); 1256 else 1257 dev_warn(dev->ctrl.device, 1258 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1259 csts, result); 1260 } 1261 1262 static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 1263 { 1264 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1265 struct nvme_queue *nvmeq = iod->nvmeq; 1266 struct nvme_dev *dev = nvmeq->dev; 1267 struct request *abort_req; 1268 struct nvme_command cmd; 1269 u32 csts = readl(dev->bar + NVME_REG_CSTS); 1270 1271 /* If PCI error recovery process is happening, we cannot reset or 1272 * the recovery mechanism will surely fail. 1273 */ 1274 mb(); 1275 if (pci_channel_offline(to_pci_dev(dev->dev))) 1276 return BLK_EH_RESET_TIMER; 1277 1278 /* 1279 * Reset immediately if the controller is failed 1280 */ 1281 if (nvme_should_reset(dev, csts)) { 1282 nvme_warn_reset(dev, csts); 1283 nvme_dev_disable(dev, false); 1284 nvme_reset_ctrl(&dev->ctrl); 1285 return BLK_EH_DONE; 1286 } 1287 1288 /* 1289 * Did we miss an interrupt? 1290 */ 1291 if (nvme_poll_irqdisable(nvmeq, req->tag)) { 1292 dev_warn(dev->ctrl.device, 1293 "I/O %d QID %d timeout, completion polled\n", 1294 req->tag, nvmeq->qid); 1295 return BLK_EH_DONE; 1296 } 1297 1298 /* 1299 * Shutdown immediately if controller times out while starting. The 1300 * reset work will see the pci device disabled when it gets the forced 1301 * cancellation error. All outstanding requests are completed on 1302 * shutdown, so we return BLK_EH_DONE. 1303 */ 1304 switch (dev->ctrl.state) { 1305 case NVME_CTRL_CONNECTING: 1306 case NVME_CTRL_RESETTING: 1307 dev_warn_ratelimited(dev->ctrl.device, 1308 "I/O %d QID %d timeout, disable controller\n", 1309 req->tag, nvmeq->qid); 1310 nvme_dev_disable(dev, false); 1311 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1312 return BLK_EH_DONE; 1313 default: 1314 break; 1315 } 1316 1317 /* 1318 * Shutdown the controller immediately and schedule a reset if the 1319 * command was already aborted once before and still hasn't been 1320 * returned to the driver, or if this is the admin queue. 1321 */ 1322 if (!nvmeq->qid || iod->aborted) { 1323 dev_warn(dev->ctrl.device, 1324 "I/O %d QID %d timeout, reset controller\n", 1325 req->tag, nvmeq->qid); 1326 nvme_dev_disable(dev, false); 1327 nvme_reset_ctrl(&dev->ctrl); 1328 1329 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1330 return BLK_EH_DONE; 1331 } 1332 1333 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1334 atomic_inc(&dev->ctrl.abort_limit); 1335 return BLK_EH_RESET_TIMER; 1336 } 1337 iod->aborted = 1; 1338 1339 memset(&cmd, 0, sizeof(cmd)); 1340 cmd.abort.opcode = nvme_admin_abort_cmd; 1341 cmd.abort.cid = req->tag; 1342 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 1343 1344 dev_warn(nvmeq->dev->ctrl.device, 1345 "I/O %d QID %d timeout, aborting\n", 1346 req->tag, nvmeq->qid); 1347 1348 abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, 1349 BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 1350 if (IS_ERR(abort_req)) { 1351 atomic_inc(&dev->ctrl.abort_limit); 1352 return BLK_EH_RESET_TIMER; 1353 } 1354 1355 abort_req->timeout = ADMIN_TIMEOUT; 1356 abort_req->end_io_data = NULL; 1357 blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); 1358 1359 /* 1360 * The aborted req will be completed on receiving the abort req. 1361 * We enable the timer again. If hit twice, it'll cause a device reset, 1362 * as the device then is in a faulty state. 1363 */ 1364 return BLK_EH_RESET_TIMER; 1365 } 1366 1367 static void nvme_free_queue(struct nvme_queue *nvmeq) 1368 { 1369 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth), 1370 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1371 if (!nvmeq->sq_cmds) 1372 return; 1373 1374 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 1375 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 1376 nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth)); 1377 } else { 1378 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth), 1379 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1380 } 1381 } 1382 1383 static void nvme_free_queues(struct nvme_dev *dev, int lowest) 1384 { 1385 int i; 1386 1387 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1388 dev->ctrl.queue_count--; 1389 nvme_free_queue(&dev->queues[i]); 1390 } 1391 } 1392 1393 /** 1394 * nvme_suspend_queue - put queue into suspended state 1395 * @nvmeq: queue to suspend 1396 */ 1397 static int nvme_suspend_queue(struct nvme_queue *nvmeq) 1398 { 1399 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 1400 return 1; 1401 1402 /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1403 mb(); 1404 1405 nvmeq->dev->online_queues--; 1406 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 1407 blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); 1408 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) 1409 pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); 1410 return 0; 1411 } 1412 1413 static void nvme_suspend_io_queues(struct nvme_dev *dev) 1414 { 1415 int i; 1416 1417 for (i = dev->ctrl.queue_count - 1; i > 0; i--) 1418 nvme_suspend_queue(&dev->queues[i]); 1419 } 1420 1421 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 1422 { 1423 struct nvme_queue *nvmeq = &dev->queues[0]; 1424 1425 if (shutdown) 1426 nvme_shutdown_ctrl(&dev->ctrl); 1427 else 1428 nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); 1429 1430 nvme_poll_irqdisable(nvmeq, -1); 1431 } 1432 1433 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 1434 int entry_size) 1435 { 1436 int q_depth = dev->q_depth; 1437 unsigned q_size_aligned = roundup(q_depth * entry_size, 1438 dev->ctrl.page_size); 1439 1440 if (q_size_aligned * nr_io_queues > dev->cmb_size) { 1441 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 1442 mem_per_q = round_down(mem_per_q, dev->ctrl.page_size); 1443 q_depth = div_u64(mem_per_q, entry_size); 1444 1445 /* 1446 * Ensure the reduced q_depth is above some threshold where it 1447 * would be better to map queues in system memory with the 1448 * original depth 1449 */ 1450 if (q_depth < 64) 1451 return -ENOMEM; 1452 } 1453 1454 return q_depth; 1455 } 1456 1457 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1458 int qid, int depth) 1459 { 1460 struct pci_dev *pdev = to_pci_dev(dev->dev); 1461 1462 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 1463 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth)); 1464 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 1465 nvmeq->sq_cmds); 1466 if (nvmeq->sq_dma_addr) { 1467 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 1468 return 0; 1469 } 1470 } 1471 1472 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 1473 &nvmeq->sq_dma_addr, GFP_KERNEL); 1474 if (!nvmeq->sq_cmds) 1475 return -ENOMEM; 1476 return 0; 1477 } 1478 1479 static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 1480 { 1481 struct nvme_queue *nvmeq = &dev->queues[qid]; 1482 1483 if (dev->ctrl.queue_count > qid) 1484 return 0; 1485 1486 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth), 1487 &nvmeq->cq_dma_addr, GFP_KERNEL); 1488 if (!nvmeq->cqes) 1489 goto free_nvmeq; 1490 1491 if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) 1492 goto free_cqdma; 1493 1494 nvmeq->dev = dev; 1495 spin_lock_init(&nvmeq->sq_lock); 1496 spin_lock_init(&nvmeq->cq_poll_lock); 1497 nvmeq->cq_head = 0; 1498 nvmeq->cq_phase = 1; 1499 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1500 nvmeq->q_depth = depth; 1501 nvmeq->qid = qid; 1502 dev->ctrl.queue_count++; 1503 1504 return 0; 1505 1506 free_cqdma: 1507 dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, 1508 nvmeq->cq_dma_addr); 1509 free_nvmeq: 1510 return -ENOMEM; 1511 } 1512 1513 static int queue_request_irq(struct nvme_queue *nvmeq) 1514 { 1515 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1516 int nr = nvmeq->dev->ctrl.instance; 1517 1518 if (use_threaded_interrupts) { 1519 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 1520 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 1521 } else { 1522 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 1523 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 1524 } 1525 } 1526 1527 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 1528 { 1529 struct nvme_dev *dev = nvmeq->dev; 1530 1531 nvmeq->sq_tail = 0; 1532 nvmeq->last_sq_tail = 0; 1533 nvmeq->cq_head = 0; 1534 nvmeq->cq_phase = 1; 1535 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1536 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 1537 nvme_dbbuf_init(dev, nvmeq, qid); 1538 dev->online_queues++; 1539 wmb(); /* ensure the first interrupt sees the initialization */ 1540 } 1541 1542 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 1543 { 1544 struct nvme_dev *dev = nvmeq->dev; 1545 int result; 1546 u16 vector = 0; 1547 1548 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1549 1550 /* 1551 * A queue's vector matches the queue identifier unless the controller 1552 * has only one vector available. 1553 */ 1554 if (!polled) 1555 vector = dev->num_vecs == 1 ? 0 : qid; 1556 else 1557 set_bit(NVMEQ_POLLED, &nvmeq->flags); 1558 1559 result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1560 if (result) 1561 return result; 1562 1563 result = adapter_alloc_sq(dev, qid, nvmeq); 1564 if (result < 0) 1565 return result; 1566 else if (result) 1567 goto release_cq; 1568 1569 nvmeq->cq_vector = vector; 1570 nvme_init_queue(nvmeq, qid); 1571 1572 if (!polled) { 1573 nvmeq->cq_vector = vector; 1574 result = queue_request_irq(nvmeq); 1575 if (result < 0) 1576 goto release_sq; 1577 } 1578 1579 set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1580 return result; 1581 1582 release_sq: 1583 dev->online_queues--; 1584 adapter_delete_sq(dev, qid); 1585 release_cq: 1586 adapter_delete_cq(dev, qid); 1587 return result; 1588 } 1589 1590 static const struct blk_mq_ops nvme_mq_admin_ops = { 1591 .queue_rq = nvme_queue_rq, 1592 .complete = nvme_pci_complete_rq, 1593 .init_hctx = nvme_admin_init_hctx, 1594 .exit_hctx = nvme_admin_exit_hctx, 1595 .init_request = nvme_init_request, 1596 .timeout = nvme_timeout, 1597 }; 1598 1599 static const struct blk_mq_ops nvme_mq_ops = { 1600 .queue_rq = nvme_queue_rq, 1601 .complete = nvme_pci_complete_rq, 1602 .commit_rqs = nvme_commit_rqs, 1603 .init_hctx = nvme_init_hctx, 1604 .init_request = nvme_init_request, 1605 .map_queues = nvme_pci_map_queues, 1606 .timeout = nvme_timeout, 1607 .poll = nvme_poll, 1608 }; 1609 1610 static void nvme_dev_remove_admin(struct nvme_dev *dev) 1611 { 1612 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 1613 /* 1614 * If the controller was reset during removal, it's possible 1615 * user requests may be waiting on a stopped queue. Start the 1616 * queue to flush these to completion. 1617 */ 1618 blk_mq_unquiesce_queue(dev->ctrl.admin_q); 1619 blk_cleanup_queue(dev->ctrl.admin_q); 1620 blk_mq_free_tag_set(&dev->admin_tagset); 1621 } 1622 } 1623 1624 static int nvme_alloc_admin_tags(struct nvme_dev *dev) 1625 { 1626 if (!dev->ctrl.admin_q) { 1627 dev->admin_tagset.ops = &nvme_mq_admin_ops; 1628 dev->admin_tagset.nr_hw_queues = 1; 1629 1630 dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1631 dev->admin_tagset.timeout = ADMIN_TIMEOUT; 1632 dev->admin_tagset.numa_node = dev_to_node(dev->dev); 1633 dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false); 1634 dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; 1635 dev->admin_tagset.driver_data = dev; 1636 1637 if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 1638 return -ENOMEM; 1639 dev->ctrl.admin_tagset = &dev->admin_tagset; 1640 1641 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); 1642 if (IS_ERR(dev->ctrl.admin_q)) { 1643 blk_mq_free_tag_set(&dev->admin_tagset); 1644 return -ENOMEM; 1645 } 1646 if (!blk_get_queue(dev->ctrl.admin_q)) { 1647 nvme_dev_remove_admin(dev); 1648 dev->ctrl.admin_q = NULL; 1649 return -ENODEV; 1650 } 1651 } else 1652 blk_mq_unquiesce_queue(dev->ctrl.admin_q); 1653 1654 return 0; 1655 } 1656 1657 static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 1658 { 1659 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 1660 } 1661 1662 static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 1663 { 1664 struct pci_dev *pdev = to_pci_dev(dev->dev); 1665 1666 if (size <= dev->bar_mapped_size) 1667 return 0; 1668 if (size > pci_resource_len(pdev, 0)) 1669 return -ENOMEM; 1670 if (dev->bar) 1671 iounmap(dev->bar); 1672 dev->bar = ioremap(pci_resource_start(pdev, 0), size); 1673 if (!dev->bar) { 1674 dev->bar_mapped_size = 0; 1675 return -ENOMEM; 1676 } 1677 dev->bar_mapped_size = size; 1678 dev->dbs = dev->bar + NVME_REG_DBS; 1679 1680 return 0; 1681 } 1682 1683 static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 1684 { 1685 int result; 1686 u32 aqa; 1687 struct nvme_queue *nvmeq; 1688 1689 result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 1690 if (result < 0) 1691 return result; 1692 1693 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 1694 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 1695 1696 if (dev->subsystem && 1697 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 1698 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 1699 1700 result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); 1701 if (result < 0) 1702 return result; 1703 1704 result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1705 if (result) 1706 return result; 1707 1708 nvmeq = &dev->queues[0]; 1709 aqa = nvmeq->q_depth - 1; 1710 aqa |= aqa << 16; 1711 1712 writel(aqa, dev->bar + NVME_REG_AQA); 1713 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 1714 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 1715 1716 result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap); 1717 if (result) 1718 return result; 1719 1720 nvmeq->cq_vector = 0; 1721 nvme_init_queue(nvmeq, 0); 1722 result = queue_request_irq(nvmeq); 1723 if (result) { 1724 dev->online_queues--; 1725 return result; 1726 } 1727 1728 set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1729 return result; 1730 } 1731 1732 static int nvme_create_io_queues(struct nvme_dev *dev) 1733 { 1734 unsigned i, max, rw_queues; 1735 int ret = 0; 1736 1737 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1738 if (nvme_alloc_queue(dev, i, dev->q_depth)) { 1739 ret = -ENOMEM; 1740 break; 1741 } 1742 } 1743 1744 max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1745 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 1746 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 1747 dev->io_queues[HCTX_TYPE_READ]; 1748 } else { 1749 rw_queues = max; 1750 } 1751 1752 for (i = dev->online_queues; i <= max; i++) { 1753 bool polled = i > rw_queues; 1754 1755 ret = nvme_create_queue(&dev->queues[i], i, polled); 1756 if (ret) 1757 break; 1758 } 1759 1760 /* 1761 * Ignore failing Create SQ/CQ commands, we can continue with less 1762 * than the desired amount of queues, and even a controller without 1763 * I/O queues can still be used to issue admin commands. This might 1764 * be useful to upgrade a buggy firmware for example. 1765 */ 1766 return ret >= 0 ? 0 : ret; 1767 } 1768 1769 static ssize_t nvme_cmb_show(struct device *dev, 1770 struct device_attribute *attr, 1771 char *buf) 1772 { 1773 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 1774 1775 return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", 1776 ndev->cmbloc, ndev->cmbsz); 1777 } 1778 static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); 1779 1780 static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 1781 { 1782 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 1783 1784 return 1ULL << (12 + 4 * szu); 1785 } 1786 1787 static u32 nvme_cmb_size(struct nvme_dev *dev) 1788 { 1789 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 1790 } 1791 1792 static void nvme_map_cmb(struct nvme_dev *dev) 1793 { 1794 u64 size, offset; 1795 resource_size_t bar_size; 1796 struct pci_dev *pdev = to_pci_dev(dev->dev); 1797 int bar; 1798 1799 if (dev->cmb_size) 1800 return; 1801 1802 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1803 if (!dev->cmbsz) 1804 return; 1805 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 1806 1807 size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 1808 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 1809 bar = NVME_CMB_BIR(dev->cmbloc); 1810 bar_size = pci_resource_len(pdev, bar); 1811 1812 if (offset > bar_size) 1813 return; 1814 1815 /* 1816 * Controllers may support a CMB size larger than their BAR, 1817 * for example, due to being behind a bridge. Reduce the CMB to 1818 * the reported size of the BAR 1819 */ 1820 if (size > bar_size - offset) 1821 size = bar_size - offset; 1822 1823 if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 1824 dev_warn(dev->ctrl.device, 1825 "failed to register the CMB\n"); 1826 return; 1827 } 1828 1829 dev->cmb_size = size; 1830 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 1831 1832 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 1833 (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 1834 pci_p2pmem_publish(pdev, true); 1835 1836 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1837 &dev_attr_cmb.attr, NULL)) 1838 dev_warn(dev->ctrl.device, 1839 "failed to add sysfs attribute for CMB\n"); 1840 } 1841 1842 static inline void nvme_release_cmb(struct nvme_dev *dev) 1843 { 1844 if (dev->cmb_size) { 1845 sysfs_remove_file_from_group(&dev->ctrl.device->kobj, 1846 &dev_attr_cmb.attr, NULL); 1847 dev->cmb_size = 0; 1848 } 1849 } 1850 1851 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 1852 { 1853 u64 dma_addr = dev->host_mem_descs_dma; 1854 struct nvme_command c; 1855 int ret; 1856 1857 memset(&c, 0, sizeof(c)); 1858 c.features.opcode = nvme_admin_set_features; 1859 c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 1860 c.features.dword11 = cpu_to_le32(bits); 1861 c.features.dword12 = cpu_to_le32(dev->host_mem_size >> 1862 ilog2(dev->ctrl.page_size)); 1863 c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 1864 c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 1865 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 1866 1867 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1868 if (ret) { 1869 dev_warn(dev->ctrl.device, 1870 "failed to set host mem (err %d, flags %#x).\n", 1871 ret, bits); 1872 } 1873 return ret; 1874 } 1875 1876 static void nvme_free_host_mem(struct nvme_dev *dev) 1877 { 1878 int i; 1879 1880 for (i = 0; i < dev->nr_host_mem_descs; i++) { 1881 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 1882 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; 1883 1884 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 1885 le64_to_cpu(desc->addr), 1886 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 1887 } 1888 1889 kfree(dev->host_mem_desc_bufs); 1890 dev->host_mem_desc_bufs = NULL; 1891 dma_free_coherent(dev->dev, 1892 dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), 1893 dev->host_mem_descs, dev->host_mem_descs_dma); 1894 dev->host_mem_descs = NULL; 1895 dev->nr_host_mem_descs = 0; 1896 } 1897 1898 static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, 1899 u32 chunk_size) 1900 { 1901 struct nvme_host_mem_buf_desc *descs; 1902 u32 max_entries, len; 1903 dma_addr_t descs_dma; 1904 int i = 0; 1905 void **bufs; 1906 u64 size, tmp; 1907 1908 tmp = (preferred + chunk_size - 1); 1909 do_div(tmp, chunk_size); 1910 max_entries = tmp; 1911 1912 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1913 max_entries = dev->ctrl.hmmaxd; 1914 1915 descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), 1916 &descs_dma, GFP_KERNEL); 1917 if (!descs) 1918 goto out; 1919 1920 bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 1921 if (!bufs) 1922 goto out_free_descs; 1923 1924 for (size = 0; size < preferred && i < max_entries; size += len) { 1925 dma_addr_t dma_addr; 1926 1927 len = min_t(u64, chunk_size, preferred - size); 1928 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 1929 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 1930 if (!bufs[i]) 1931 break; 1932 1933 descs[i].addr = cpu_to_le64(dma_addr); 1934 descs[i].size = cpu_to_le32(len / dev->ctrl.page_size); 1935 i++; 1936 } 1937 1938 if (!size) 1939 goto out_free_bufs; 1940 1941 dev->nr_host_mem_descs = i; 1942 dev->host_mem_size = size; 1943 dev->host_mem_descs = descs; 1944 dev->host_mem_descs_dma = descs_dma; 1945 dev->host_mem_desc_bufs = bufs; 1946 return 0; 1947 1948 out_free_bufs: 1949 while (--i >= 0) { 1950 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; 1951 1952 dma_free_attrs(dev->dev, size, bufs[i], 1953 le64_to_cpu(descs[i].addr), 1954 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 1955 } 1956 1957 kfree(bufs); 1958 out_free_descs: 1959 dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, 1960 descs_dma); 1961 out: 1962 dev->host_mem_descs = NULL; 1963 return -ENOMEM; 1964 } 1965 1966 static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 1967 { 1968 u32 chunk_size; 1969 1970 /* start big and work our way down */ 1971 for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 1972 chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 1973 chunk_size /= 2) { 1974 if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { 1975 if (!min || dev->host_mem_size >= min) 1976 return 0; 1977 nvme_free_host_mem(dev); 1978 } 1979 } 1980 1981 return -ENOMEM; 1982 } 1983 1984 static int nvme_setup_host_mem(struct nvme_dev *dev) 1985 { 1986 u64 max = (u64)max_host_mem_size_mb * SZ_1M; 1987 u64 preferred = (u64)dev->ctrl.hmpre * 4096; 1988 u64 min = (u64)dev->ctrl.hmmin * 4096; 1989 u32 enable_bits = NVME_HOST_MEM_ENABLE; 1990 int ret; 1991 1992 preferred = min(preferred, max); 1993 if (min > max) { 1994 dev_warn(dev->ctrl.device, 1995 "min host memory (%lld MiB) above limit (%d MiB).\n", 1996 min >> ilog2(SZ_1M), max_host_mem_size_mb); 1997 nvme_free_host_mem(dev); 1998 return 0; 1999 } 2000 2001 /* 2002 * If we already have a buffer allocated check if we can reuse it. 2003 */ 2004 if (dev->host_mem_descs) { 2005 if (dev->host_mem_size >= min) 2006 enable_bits |= NVME_HOST_MEM_RETURN; 2007 else 2008 nvme_free_host_mem(dev); 2009 } 2010 2011 if (!dev->host_mem_descs) { 2012 if (nvme_alloc_host_mem(dev, min, preferred)) { 2013 dev_warn(dev->ctrl.device, 2014 "failed to allocate host memory buffer.\n"); 2015 return 0; /* controller must work without HMB */ 2016 } 2017 2018 dev_info(dev->ctrl.device, 2019 "allocated %lld MiB host memory buffer.\n", 2020 dev->host_mem_size >> ilog2(SZ_1M)); 2021 } 2022 2023 ret = nvme_set_host_mem(dev, enable_bits); 2024 if (ret) 2025 nvme_free_host_mem(dev); 2026 return ret; 2027 } 2028 2029 /* 2030 * nirqs is the number of interrupts available for write and read 2031 * queues. The core already reserved an interrupt for the admin queue. 2032 */ 2033 static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) 2034 { 2035 struct nvme_dev *dev = affd->priv; 2036 unsigned int nr_read_queues; 2037 2038 /* 2039 * If there is no interupt available for queues, ensure that 2040 * the default queue is set to 1. The affinity set size is 2041 * also set to one, but the irq core ignores it for this case. 2042 * 2043 * If only one interrupt is available or 'write_queue' == 0, combine 2044 * write and read queues. 2045 * 2046 * If 'write_queues' > 0, ensure it leaves room for at least one read 2047 * queue. 2048 */ 2049 if (!nrirqs) { 2050 nrirqs = 1; 2051 nr_read_queues = 0; 2052 } else if (nrirqs == 1 || !write_queues) { 2053 nr_read_queues = 0; 2054 } else if (write_queues >= nrirqs) { 2055 nr_read_queues = 1; 2056 } else { 2057 nr_read_queues = nrirqs - write_queues; 2058 } 2059 2060 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2061 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2062 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; 2063 affd->set_size[HCTX_TYPE_READ] = nr_read_queues; 2064 affd->nr_sets = nr_read_queues ? 2 : 1; 2065 } 2066 2067 static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 2068 { 2069 struct pci_dev *pdev = to_pci_dev(dev->dev); 2070 struct irq_affinity affd = { 2071 .pre_vectors = 1, 2072 .calc_sets = nvme_calc_irq_sets, 2073 .priv = dev, 2074 }; 2075 unsigned int irq_queues, this_p_queues; 2076 2077 /* 2078 * Poll queues don't need interrupts, but we need at least one IO 2079 * queue left over for non-polled IO. 2080 */ 2081 this_p_queues = poll_queues; 2082 if (this_p_queues >= nr_io_queues) { 2083 this_p_queues = nr_io_queues - 1; 2084 irq_queues = 1; 2085 } else { 2086 irq_queues = nr_io_queues - this_p_queues + 1; 2087 } 2088 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; 2089 2090 /* Initialize for the single interrupt case */ 2091 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2092 dev->io_queues[HCTX_TYPE_READ] = 0; 2093 2094 return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, 2095 PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); 2096 } 2097 2098 static void nvme_disable_io_queues(struct nvme_dev *dev) 2099 { 2100 if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 2101 __nvme_disable_io_queues(dev, nvme_admin_delete_cq); 2102 } 2103 2104 static int nvme_setup_io_queues(struct nvme_dev *dev) 2105 { 2106 struct nvme_queue *adminq = &dev->queues[0]; 2107 struct pci_dev *pdev = to_pci_dev(dev->dev); 2108 int result, nr_io_queues; 2109 unsigned long size; 2110 2111 nr_io_queues = max_io_queues(); 2112 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 2113 if (result < 0) 2114 return result; 2115 2116 if (nr_io_queues == 0) 2117 return 0; 2118 2119 clear_bit(NVMEQ_ENABLED, &adminq->flags); 2120 2121 if (dev->cmb_use_sqes) { 2122 result = nvme_cmb_qdepth(dev, nr_io_queues, 2123 sizeof(struct nvme_command)); 2124 if (result > 0) 2125 dev->q_depth = result; 2126 else 2127 dev->cmb_use_sqes = false; 2128 } 2129 2130 do { 2131 size = db_bar_size(dev, nr_io_queues); 2132 result = nvme_remap_bar(dev, size); 2133 if (!result) 2134 break; 2135 if (!--nr_io_queues) 2136 return -ENOMEM; 2137 } while (1); 2138 adminq->q_db = dev->dbs; 2139 2140 retry: 2141 /* Deregister the admin queue's interrupt */ 2142 pci_free_irq(pdev, 0, adminq); 2143 2144 /* 2145 * If we enable msix early due to not intx, disable it again before 2146 * setting up the full range we need. 2147 */ 2148 pci_free_irq_vectors(pdev); 2149 2150 result = nvme_setup_irqs(dev, nr_io_queues); 2151 if (result <= 0) 2152 return -EIO; 2153 2154 dev->num_vecs = result; 2155 result = max(result - 1, 1); 2156 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 2157 2158 /* 2159 * Should investigate if there's a performance win from allocating 2160 * more queues than interrupt vectors; it might allow the submission 2161 * path to scale better, even if the receive path is limited by the 2162 * number of interrupts. 2163 */ 2164 result = queue_request_irq(adminq); 2165 if (result) 2166 return result; 2167 set_bit(NVMEQ_ENABLED, &adminq->flags); 2168 2169 result = nvme_create_io_queues(dev); 2170 if (result || dev->online_queues < 2) 2171 return result; 2172 2173 if (dev->online_queues - 1 < dev->max_qid) { 2174 nr_io_queues = dev->online_queues - 1; 2175 nvme_disable_io_queues(dev); 2176 nvme_suspend_io_queues(dev); 2177 goto retry; 2178 } 2179 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 2180 dev->io_queues[HCTX_TYPE_DEFAULT], 2181 dev->io_queues[HCTX_TYPE_READ], 2182 dev->io_queues[HCTX_TYPE_POLL]); 2183 return 0; 2184 } 2185 2186 static void nvme_del_queue_end(struct request *req, blk_status_t error) 2187 { 2188 struct nvme_queue *nvmeq = req->end_io_data; 2189 2190 blk_mq_free_request(req); 2191 complete(&nvmeq->delete_done); 2192 } 2193 2194 static void nvme_del_cq_end(struct request *req, blk_status_t error) 2195 { 2196 struct nvme_queue *nvmeq = req->end_io_data; 2197 2198 if (error) 2199 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2200 2201 nvme_del_queue_end(req, error); 2202 } 2203 2204 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2205 { 2206 struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2207 struct request *req; 2208 struct nvme_command cmd; 2209 2210 memset(&cmd, 0, sizeof(cmd)); 2211 cmd.delete_queue.opcode = opcode; 2212 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2213 2214 req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 2215 if (IS_ERR(req)) 2216 return PTR_ERR(req); 2217 2218 req->timeout = ADMIN_TIMEOUT; 2219 req->end_io_data = nvmeq; 2220 2221 init_completion(&nvmeq->delete_done); 2222 blk_execute_rq_nowait(q, NULL, req, false, 2223 opcode == nvme_admin_delete_cq ? 2224 nvme_del_cq_end : nvme_del_queue_end); 2225 return 0; 2226 } 2227 2228 static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2229 { 2230 int nr_queues = dev->online_queues - 1, sent = 0; 2231 unsigned long timeout; 2232 2233 retry: 2234 timeout = ADMIN_TIMEOUT; 2235 while (nr_queues > 0) { 2236 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2237 break; 2238 nr_queues--; 2239 sent++; 2240 } 2241 while (sent) { 2242 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2243 2244 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 2245 timeout); 2246 if (timeout == 0) 2247 return false; 2248 2249 /* handle any remaining CQEs */ 2250 if (opcode == nvme_admin_delete_cq && 2251 !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags)) 2252 nvme_poll_irqdisable(nvmeq, -1); 2253 2254 sent--; 2255 if (nr_queues) 2256 goto retry; 2257 } 2258 return true; 2259 } 2260 2261 /* 2262 * return error value only when tagset allocation failed 2263 */ 2264 static int nvme_dev_add(struct nvme_dev *dev) 2265 { 2266 int ret; 2267 2268 if (!dev->ctrl.tagset) { 2269 dev->tagset.ops = &nvme_mq_ops; 2270 dev->tagset.nr_hw_queues = dev->online_queues - 1; 2271 dev->tagset.nr_maps = 2; /* default + read */ 2272 if (dev->io_queues[HCTX_TYPE_POLL]) 2273 dev->tagset.nr_maps++; 2274 dev->tagset.timeout = NVME_IO_TIMEOUT; 2275 dev->tagset.numa_node = dev_to_node(dev->dev); 2276 dev->tagset.queue_depth = 2277 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; 2278 dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false); 2279 if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) { 2280 dev->tagset.cmd_size = max(dev->tagset.cmd_size, 2281 nvme_pci_cmd_size(dev, true)); 2282 } 2283 dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; 2284 dev->tagset.driver_data = dev; 2285 2286 ret = blk_mq_alloc_tag_set(&dev->tagset); 2287 if (ret) { 2288 dev_warn(dev->ctrl.device, 2289 "IO queues tagset allocation failed %d\n", ret); 2290 return ret; 2291 } 2292 dev->ctrl.tagset = &dev->tagset; 2293 2294 nvme_dbbuf_set(dev); 2295 } else { 2296 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 2297 2298 /* Free previously allocated queues that are no longer usable */ 2299 nvme_free_queues(dev, dev->online_queues); 2300 } 2301 2302 return 0; 2303 } 2304 2305 static int nvme_pci_enable(struct nvme_dev *dev) 2306 { 2307 int result = -ENOMEM; 2308 struct pci_dev *pdev = to_pci_dev(dev->dev); 2309 2310 if (pci_enable_device_mem(pdev)) 2311 return result; 2312 2313 pci_set_master(pdev); 2314 2315 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 2316 dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) 2317 goto disable; 2318 2319 if (readl(dev->bar + NVME_REG_CSTS) == -1) { 2320 result = -ENODEV; 2321 goto disable; 2322 } 2323 2324 /* 2325 * Some devices and/or platforms don't advertise or work with INTx 2326 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2327 * adjust this later. 2328 */ 2329 result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 2330 if (result < 0) 2331 return result; 2332 2333 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 2334 2335 dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2336 io_queue_depth); 2337 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 2338 dev->dbs = dev->bar + 4096; 2339 2340 /* 2341 * Temporary fix for the Apple controller found in the MacBook8,1 and 2342 * some MacBook7,1 to avoid controller resets and data loss. 2343 */ 2344 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 2345 dev->q_depth = 2; 2346 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " 2347 "set queue depth=%u to work around controller resets\n", 2348 dev->q_depth); 2349 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2350 (pdev->device == 0xa821 || pdev->device == 0xa822) && 2351 NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2352 dev->q_depth = 64; 2353 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2354 "set queue depth=%u\n", dev->q_depth); 2355 } 2356 2357 nvme_map_cmb(dev); 2358 2359 pci_enable_pcie_error_reporting(pdev); 2360 pci_save_state(pdev); 2361 return 0; 2362 2363 disable: 2364 pci_disable_device(pdev); 2365 return result; 2366 } 2367 2368 static void nvme_dev_unmap(struct nvme_dev *dev) 2369 { 2370 if (dev->bar) 2371 iounmap(dev->bar); 2372 pci_release_mem_regions(to_pci_dev(dev->dev)); 2373 } 2374 2375 static void nvme_pci_disable(struct nvme_dev *dev) 2376 { 2377 struct pci_dev *pdev = to_pci_dev(dev->dev); 2378 2379 pci_free_irq_vectors(pdev); 2380 2381 if (pci_is_enabled(pdev)) { 2382 pci_disable_pcie_error_reporting(pdev); 2383 pci_disable_device(pdev); 2384 } 2385 } 2386 2387 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 2388 { 2389 bool dead = true; 2390 struct pci_dev *pdev = to_pci_dev(dev->dev); 2391 2392 mutex_lock(&dev->shutdown_lock); 2393 if (pci_is_enabled(pdev)) { 2394 u32 csts = readl(dev->bar + NVME_REG_CSTS); 2395 2396 if (dev->ctrl.state == NVME_CTRL_LIVE || 2397 dev->ctrl.state == NVME_CTRL_RESETTING) 2398 nvme_start_freeze(&dev->ctrl); 2399 dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 2400 pdev->error_state != pci_channel_io_normal); 2401 } 2402 2403 /* 2404 * Give the controller a chance to complete all entered requests if 2405 * doing a safe shutdown. 2406 */ 2407 if (!dead) { 2408 if (shutdown) 2409 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 2410 } 2411 2412 nvme_stop_queues(&dev->ctrl); 2413 2414 if (!dead && dev->ctrl.queue_count > 0) { 2415 nvme_disable_io_queues(dev); 2416 nvme_disable_admin_queue(dev, shutdown); 2417 } 2418 nvme_suspend_io_queues(dev); 2419 nvme_suspend_queue(&dev->queues[0]); 2420 nvme_pci_disable(dev); 2421 2422 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 2423 blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); 2424 2425 /* 2426 * The driver will not be starting up queues again if shutting down so 2427 * must flush all entered requests to their failed completion to avoid 2428 * deadlocking blk-mq hot-cpu notifier. 2429 */ 2430 if (shutdown) 2431 nvme_start_queues(&dev->ctrl); 2432 mutex_unlock(&dev->shutdown_lock); 2433 } 2434 2435 static int nvme_setup_prp_pools(struct nvme_dev *dev) 2436 { 2437 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 2438 PAGE_SIZE, PAGE_SIZE, 0); 2439 if (!dev->prp_page_pool) 2440 return -ENOMEM; 2441 2442 /* Optimisation for I/Os between 4k and 128k */ 2443 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 2444 256, 256, 0); 2445 if (!dev->prp_small_pool) { 2446 dma_pool_destroy(dev->prp_page_pool); 2447 return -ENOMEM; 2448 } 2449 return 0; 2450 } 2451 2452 static void nvme_release_prp_pools(struct nvme_dev *dev) 2453 { 2454 dma_pool_destroy(dev->prp_page_pool); 2455 dma_pool_destroy(dev->prp_small_pool); 2456 } 2457 2458 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 2459 { 2460 struct nvme_dev *dev = to_nvme_dev(ctrl); 2461 2462 nvme_dbbuf_dma_free(dev); 2463 put_device(dev->dev); 2464 if (dev->tagset.tags) 2465 blk_mq_free_tag_set(&dev->tagset); 2466 if (dev->ctrl.admin_q) 2467 blk_put_queue(dev->ctrl.admin_q); 2468 kfree(dev->queues); 2469 free_opal_dev(dev->ctrl.opal_dev); 2470 mempool_destroy(dev->iod_mempool); 2471 kfree(dev); 2472 } 2473 2474 static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status) 2475 { 2476 dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status); 2477 2478 nvme_get_ctrl(&dev->ctrl); 2479 nvme_dev_disable(dev, false); 2480 nvme_kill_queues(&dev->ctrl); 2481 if (!queue_work(nvme_wq, &dev->remove_work)) 2482 nvme_put_ctrl(&dev->ctrl); 2483 } 2484 2485 static void nvme_reset_work(struct work_struct *work) 2486 { 2487 struct nvme_dev *dev = 2488 container_of(work, struct nvme_dev, ctrl.reset_work); 2489 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2490 int result = -ENODEV; 2491 enum nvme_ctrl_state new_state = NVME_CTRL_LIVE; 2492 2493 if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) 2494 goto out; 2495 2496 /* 2497 * If we're called to reset a live controller first shut it down before 2498 * moving on. 2499 */ 2500 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2501 nvme_dev_disable(dev, false); 2502 2503 mutex_lock(&dev->shutdown_lock); 2504 result = nvme_pci_enable(dev); 2505 if (result) 2506 goto out_unlock; 2507 2508 result = nvme_pci_configure_admin_queue(dev); 2509 if (result) 2510 goto out_unlock; 2511 2512 result = nvme_alloc_admin_tags(dev); 2513 if (result) 2514 goto out_unlock; 2515 2516 /* 2517 * Limit the max command size to prevent iod->sg allocations going 2518 * over a single page. 2519 */ 2520 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; 2521 dev->ctrl.max_segments = NVME_MAX_SEGS; 2522 mutex_unlock(&dev->shutdown_lock); 2523 2524 /* 2525 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 2526 * initializing procedure here. 2527 */ 2528 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 2529 dev_warn(dev->ctrl.device, 2530 "failed to mark controller CONNECTING\n"); 2531 goto out; 2532 } 2533 2534 result = nvme_init_identify(&dev->ctrl); 2535 if (result) 2536 goto out; 2537 2538 if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { 2539 if (!dev->ctrl.opal_dev) 2540 dev->ctrl.opal_dev = 2541 init_opal_dev(&dev->ctrl, &nvme_sec_submit); 2542 else if (was_suspend) 2543 opal_unlock_from_suspend(dev->ctrl.opal_dev); 2544 } else { 2545 free_opal_dev(dev->ctrl.opal_dev); 2546 dev->ctrl.opal_dev = NULL; 2547 } 2548 2549 if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { 2550 result = nvme_dbbuf_dma_alloc(dev); 2551 if (result) 2552 dev_warn(dev->dev, 2553 "unable to allocate dma for dbbuf\n"); 2554 } 2555 2556 if (dev->ctrl.hmpre) { 2557 result = nvme_setup_host_mem(dev); 2558 if (result < 0) 2559 goto out; 2560 } 2561 2562 result = nvme_setup_io_queues(dev); 2563 if (result) 2564 goto out; 2565 2566 /* 2567 * Keep the controller around but remove all namespaces if we don't have 2568 * any working I/O queue. 2569 */ 2570 if (dev->online_queues < 2) { 2571 dev_warn(dev->ctrl.device, "IO queues not created\n"); 2572 nvme_kill_queues(&dev->ctrl); 2573 nvme_remove_namespaces(&dev->ctrl); 2574 new_state = NVME_CTRL_ADMIN_ONLY; 2575 } else { 2576 nvme_start_queues(&dev->ctrl); 2577 nvme_wait_freeze(&dev->ctrl); 2578 /* hit this only when allocate tagset fails */ 2579 if (nvme_dev_add(dev)) 2580 new_state = NVME_CTRL_ADMIN_ONLY; 2581 nvme_unfreeze(&dev->ctrl); 2582 } 2583 2584 /* 2585 * If only admin queue live, keep it to do further investigation or 2586 * recovery. 2587 */ 2588 if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) { 2589 dev_warn(dev->ctrl.device, 2590 "failed to mark controller state %d\n", new_state); 2591 goto out; 2592 } 2593 2594 nvme_start_ctrl(&dev->ctrl); 2595 return; 2596 2597 out_unlock: 2598 mutex_unlock(&dev->shutdown_lock); 2599 out: 2600 nvme_remove_dead_ctrl(dev, result); 2601 } 2602 2603 static void nvme_remove_dead_ctrl_work(struct work_struct *work) 2604 { 2605 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 2606 struct pci_dev *pdev = to_pci_dev(dev->dev); 2607 2608 if (pci_get_drvdata(pdev)) 2609 device_release_driver(&pdev->dev); 2610 nvme_put_ctrl(&dev->ctrl); 2611 } 2612 2613 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 2614 { 2615 *val = readl(to_nvme_dev(ctrl)->bar + off); 2616 return 0; 2617 } 2618 2619 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 2620 { 2621 writel(val, to_nvme_dev(ctrl)->bar + off); 2622 return 0; 2623 } 2624 2625 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 2626 { 2627 *val = readq(to_nvme_dev(ctrl)->bar + off); 2628 return 0; 2629 } 2630 2631 static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 2632 { 2633 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 2634 2635 return snprintf(buf, size, "%s", dev_name(&pdev->dev)); 2636 } 2637 2638 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 2639 .name = "pcie", 2640 .module = THIS_MODULE, 2641 .flags = NVME_F_METADATA_SUPPORTED | 2642 NVME_F_PCI_P2PDMA, 2643 .reg_read32 = nvme_pci_reg_read32, 2644 .reg_write32 = nvme_pci_reg_write32, 2645 .reg_read64 = nvme_pci_reg_read64, 2646 .free_ctrl = nvme_pci_free_ctrl, 2647 .submit_async_event = nvme_pci_submit_async_event, 2648 .get_address = nvme_pci_get_address, 2649 }; 2650 2651 static int nvme_dev_map(struct nvme_dev *dev) 2652 { 2653 struct pci_dev *pdev = to_pci_dev(dev->dev); 2654 2655 if (pci_request_mem_regions(pdev, "nvme")) 2656 return -ENODEV; 2657 2658 if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 2659 goto release; 2660 2661 return 0; 2662 release: 2663 pci_release_mem_regions(pdev); 2664 return -ENODEV; 2665 } 2666 2667 static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 2668 { 2669 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 2670 /* 2671 * Several Samsung devices seem to drop off the PCIe bus 2672 * randomly when APST is on and uses the deepest sleep state. 2673 * This has been observed on a Samsung "SM951 NVMe SAMSUNG 2674 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 2675 * 950 PRO 256GB", but it seems to be restricted to two Dell 2676 * laptops. 2677 */ 2678 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 2679 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 2680 dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 2681 return NVME_QUIRK_NO_DEEPEST_PS; 2682 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 2683 /* 2684 * Samsung SSD 960 EVO drops off the PCIe bus after system 2685 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 2686 * within few minutes after bootup on a Coffee Lake board - 2687 * ASUS PRIME Z370-A 2688 */ 2689 if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 2690 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 2691 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 2692 return NVME_QUIRK_NO_APST; 2693 } 2694 2695 return 0; 2696 } 2697 2698 static void nvme_async_probe(void *data, async_cookie_t cookie) 2699 { 2700 struct nvme_dev *dev = data; 2701 2702 nvme_reset_ctrl_sync(&dev->ctrl); 2703 flush_work(&dev->ctrl.scan_work); 2704 nvme_put_ctrl(&dev->ctrl); 2705 } 2706 2707 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2708 { 2709 int node, result = -ENOMEM; 2710 struct nvme_dev *dev; 2711 unsigned long quirks = id->driver_data; 2712 size_t alloc_size; 2713 2714 node = dev_to_node(&pdev->dev); 2715 if (node == NUMA_NO_NODE) 2716 set_dev_node(&pdev->dev, first_memory_node); 2717 2718 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 2719 if (!dev) 2720 return -ENOMEM; 2721 2722 dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue), 2723 GFP_KERNEL, node); 2724 if (!dev->queues) 2725 goto free; 2726 2727 dev->dev = get_device(&pdev->dev); 2728 pci_set_drvdata(pdev, dev); 2729 2730 result = nvme_dev_map(dev); 2731 if (result) 2732 goto put_pci; 2733 2734 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 2735 INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 2736 mutex_init(&dev->shutdown_lock); 2737 2738 result = nvme_setup_prp_pools(dev); 2739 if (result) 2740 goto unmap; 2741 2742 quirks |= check_vendor_combination_bug(pdev); 2743 2744 /* 2745 * Double check that our mempool alloc size will cover the biggest 2746 * command we support. 2747 */ 2748 alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ, 2749 NVME_MAX_SEGS, true); 2750 WARN_ON_ONCE(alloc_size > PAGE_SIZE); 2751 2752 dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, 2753 mempool_kfree, 2754 (void *) alloc_size, 2755 GFP_KERNEL, node); 2756 if (!dev->iod_mempool) { 2757 result = -ENOMEM; 2758 goto release_pools; 2759 } 2760 2761 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2762 quirks); 2763 if (result) 2764 goto release_mempool; 2765 2766 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2767 2768 nvme_get_ctrl(&dev->ctrl); 2769 async_schedule(nvme_async_probe, dev); 2770 2771 return 0; 2772 2773 release_mempool: 2774 mempool_destroy(dev->iod_mempool); 2775 release_pools: 2776 nvme_release_prp_pools(dev); 2777 unmap: 2778 nvme_dev_unmap(dev); 2779 put_pci: 2780 put_device(dev->dev); 2781 free: 2782 kfree(dev->queues); 2783 kfree(dev); 2784 return result; 2785 } 2786 2787 static void nvme_reset_prepare(struct pci_dev *pdev) 2788 { 2789 struct nvme_dev *dev = pci_get_drvdata(pdev); 2790 nvme_dev_disable(dev, false); 2791 } 2792 2793 static void nvme_reset_done(struct pci_dev *pdev) 2794 { 2795 struct nvme_dev *dev = pci_get_drvdata(pdev); 2796 nvme_reset_ctrl_sync(&dev->ctrl); 2797 } 2798 2799 static void nvme_shutdown(struct pci_dev *pdev) 2800 { 2801 struct nvme_dev *dev = pci_get_drvdata(pdev); 2802 nvme_dev_disable(dev, true); 2803 } 2804 2805 /* 2806 * The driver's remove may be called on a device in a partially initialized 2807 * state. This function must not have any dependencies on the device state in 2808 * order to proceed. 2809 */ 2810 static void nvme_remove(struct pci_dev *pdev) 2811 { 2812 struct nvme_dev *dev = pci_get_drvdata(pdev); 2813 2814 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2815 pci_set_drvdata(pdev, NULL); 2816 2817 if (!pci_device_is_present(pdev)) { 2818 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 2819 nvme_dev_disable(dev, true); 2820 nvme_dev_remove_admin(dev); 2821 } 2822 2823 flush_work(&dev->ctrl.reset_work); 2824 nvme_stop_ctrl(&dev->ctrl); 2825 nvme_remove_namespaces(&dev->ctrl); 2826 nvme_dev_disable(dev, true); 2827 nvme_release_cmb(dev); 2828 nvme_free_host_mem(dev); 2829 nvme_dev_remove_admin(dev); 2830 nvme_free_queues(dev, 0); 2831 nvme_uninit_ctrl(&dev->ctrl); 2832 nvme_release_prp_pools(dev); 2833 nvme_dev_unmap(dev); 2834 nvme_put_ctrl(&dev->ctrl); 2835 } 2836 2837 #ifdef CONFIG_PM_SLEEP 2838 static int nvme_suspend(struct device *dev) 2839 { 2840 struct pci_dev *pdev = to_pci_dev(dev); 2841 struct nvme_dev *ndev = pci_get_drvdata(pdev); 2842 2843 nvme_dev_disable(ndev, true); 2844 return 0; 2845 } 2846 2847 static int nvme_resume(struct device *dev) 2848 { 2849 struct pci_dev *pdev = to_pci_dev(dev); 2850 struct nvme_dev *ndev = pci_get_drvdata(pdev); 2851 2852 nvme_reset_ctrl(&ndev->ctrl); 2853 return 0; 2854 } 2855 #endif 2856 2857 static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); 2858 2859 static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 2860 pci_channel_state_t state) 2861 { 2862 struct nvme_dev *dev = pci_get_drvdata(pdev); 2863 2864 /* 2865 * A frozen channel requires a reset. When detected, this method will 2866 * shutdown the controller to quiesce. The controller will be restarted 2867 * after the slot reset through driver's slot_reset callback. 2868 */ 2869 switch (state) { 2870 case pci_channel_io_normal: 2871 return PCI_ERS_RESULT_CAN_RECOVER; 2872 case pci_channel_io_frozen: 2873 dev_warn(dev->ctrl.device, 2874 "frozen state error detected, reset controller\n"); 2875 nvme_dev_disable(dev, false); 2876 return PCI_ERS_RESULT_NEED_RESET; 2877 case pci_channel_io_perm_failure: 2878 dev_warn(dev->ctrl.device, 2879 "failure state error detected, request disconnect\n"); 2880 return PCI_ERS_RESULT_DISCONNECT; 2881 } 2882 return PCI_ERS_RESULT_NEED_RESET; 2883 } 2884 2885 static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 2886 { 2887 struct nvme_dev *dev = pci_get_drvdata(pdev); 2888 2889 dev_info(dev->ctrl.device, "restart after slot reset\n"); 2890 pci_restore_state(pdev); 2891 nvme_reset_ctrl(&dev->ctrl); 2892 return PCI_ERS_RESULT_RECOVERED; 2893 } 2894 2895 static void nvme_error_resume(struct pci_dev *pdev) 2896 { 2897 struct nvme_dev *dev = pci_get_drvdata(pdev); 2898 2899 flush_work(&dev->ctrl.reset_work); 2900 } 2901 2902 static const struct pci_error_handlers nvme_err_handler = { 2903 .error_detected = nvme_error_detected, 2904 .slot_reset = nvme_slot_reset, 2905 .resume = nvme_error_resume, 2906 .reset_prepare = nvme_reset_prepare, 2907 .reset_done = nvme_reset_done, 2908 }; 2909 2910 static const struct pci_device_id nvme_id_table[] = { 2911 { PCI_VDEVICE(INTEL, 0x0953), 2912 .driver_data = NVME_QUIRK_STRIPE_SIZE | 2913 NVME_QUIRK_DEALLOCATE_ZEROES, }, 2914 { PCI_VDEVICE(INTEL, 0x0a53), 2915 .driver_data = NVME_QUIRK_STRIPE_SIZE | 2916 NVME_QUIRK_DEALLOCATE_ZEROES, }, 2917 { PCI_VDEVICE(INTEL, 0x0a54), 2918 .driver_data = NVME_QUIRK_STRIPE_SIZE | 2919 NVME_QUIRK_DEALLOCATE_ZEROES, }, 2920 { PCI_VDEVICE(INTEL, 0x0a55), 2921 .driver_data = NVME_QUIRK_STRIPE_SIZE | 2922 NVME_QUIRK_DEALLOCATE_ZEROES, }, 2923 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 2924 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 2925 NVME_QUIRK_MEDIUM_PRIO_SQ }, 2926 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 2927 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 2928 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2929 .driver_data = NVME_QUIRK_IDENTIFY_CNS | 2930 NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 2931 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 2932 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2933 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 2934 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2935 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 2936 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2937 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 2938 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2939 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 2940 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2941 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 2942 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2943 { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ 2944 .driver_data = NVME_QUIRK_LIGHTNVM, }, 2945 { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ 2946 .driver_data = NVME_QUIRK_LIGHTNVM, }, 2947 { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ 2948 .driver_data = NVME_QUIRK_LIGHTNVM, }, 2949 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2950 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, 2951 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 2952 { 0, } 2953 }; 2954 MODULE_DEVICE_TABLE(pci, nvme_id_table); 2955 2956 static struct pci_driver nvme_driver = { 2957 .name = "nvme", 2958 .id_table = nvme_id_table, 2959 .probe = nvme_probe, 2960 .remove = nvme_remove, 2961 .shutdown = nvme_shutdown, 2962 .driver = { 2963 .pm = &nvme_dev_pm_ops, 2964 }, 2965 .sriov_configure = pci_sriov_configure_simple, 2966 .err_handler = &nvme_err_handler, 2967 }; 2968 2969 static int __init nvme_init(void) 2970 { 2971 BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); 2972 return pci_register_driver(&nvme_driver); 2973 } 2974 2975 static void __exit nvme_exit(void) 2976 { 2977 pci_unregister_driver(&nvme_driver); 2978 flush_workqueue(nvme_wq); 2979 _nvme_check_size(); 2980 } 2981 2982 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 2983 MODULE_LICENSE("GPL"); 2984 MODULE_VERSION("1.0"); 2985 module_init(nvme_init); 2986 module_exit(nvme_exit); 2987