1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/acpi.h> 8 #include <linux/aer.h> 9 #include <linux/async.h> 10 #include <linux/blkdev.h> 11 #include <linux/blk-mq.h> 12 #include <linux/blk-mq-pci.h> 13 #include <linux/blk-integrity.h> 14 #include <linux/dmi.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/memremap.h> 19 #include <linux/mm.h> 20 #include <linux/module.h> 21 #include <linux/mutex.h> 22 #include <linux/once.h> 23 #include <linux/pci.h> 24 #include <linux/suspend.h> 25 #include <linux/t10-pi.h> 26 #include <linux/types.h> 27 #include <linux/io-64-nonatomic-lo-hi.h> 28 #include <linux/io-64-nonatomic-hi-lo.h> 29 #include <linux/sed-opal.h> 30 #include <linux/pci-p2pdma.h> 31 32 #include "trace.h" 33 #include "nvme.h" 34 35 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 36 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 37 38 #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 39 40 /* 41 * These can be higher, but we need to ensure that any command doesn't 42 * require an sg allocation that needs more than a page of data. 43 */ 44 #define NVME_MAX_KB_SZ 4096 45 #define NVME_MAX_SEGS 127 46 47 static int use_threaded_interrupts; 48 module_param(use_threaded_interrupts, int, 0444); 49 50 static bool use_cmb_sqes = true; 51 module_param(use_cmb_sqes, bool, 0444); 52 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 53 54 static unsigned int max_host_mem_size_mb = 128; 55 module_param(max_host_mem_size_mb, uint, 0444); 56 MODULE_PARM_DESC(max_host_mem_size_mb, 57 "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 58 59 static unsigned int sgl_threshold = SZ_32K; 60 module_param(sgl_threshold, uint, 0644); 61 MODULE_PARM_DESC(sgl_threshold, 62 "Use SGLs when average request segment size is larger or equal to " 63 "this size. Use 0 to disable SGLs."); 64 65 #define NVME_PCI_MIN_QUEUE_SIZE 2 66 #define NVME_PCI_MAX_QUEUE_SIZE 4095 67 static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 68 static const struct kernel_param_ops io_queue_depth_ops = { 69 .set = io_queue_depth_set, 70 .get = param_get_uint, 71 }; 72 73 static unsigned int io_queue_depth = 1024; 74 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 75 MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); 76 77 static int io_queue_count_set(const char *val, const struct kernel_param *kp) 78 { 79 unsigned int n; 80 int ret; 81 82 ret = kstrtouint(val, 10, &n); 83 if (ret != 0 || n > num_possible_cpus()) 84 return -EINVAL; 85 return param_set_uint(val, kp); 86 } 87 88 static const struct kernel_param_ops io_queue_count_ops = { 89 .set = io_queue_count_set, 90 .get = param_get_uint, 91 }; 92 93 static unsigned int write_queues; 94 module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); 95 MODULE_PARM_DESC(write_queues, 96 "Number of queues to use for writes. If not set, reads and writes " 97 "will share a queue set."); 98 99 static unsigned int poll_queues; 100 module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); 101 MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 102 103 static bool noacpi; 104 module_param(noacpi, bool, 0444); 105 MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); 106 107 struct nvme_dev; 108 struct nvme_queue; 109 110 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 111 static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode); 112 113 /* 114 * Represents an NVM Express device. Each nvme_dev is a PCI function. 115 */ 116 struct nvme_dev { 117 struct nvme_queue *queues; 118 struct blk_mq_tag_set tagset; 119 struct blk_mq_tag_set admin_tagset; 120 u32 __iomem *dbs; 121 struct device *dev; 122 struct dma_pool *prp_page_pool; 123 struct dma_pool *prp_small_pool; 124 unsigned online_queues; 125 unsigned max_qid; 126 unsigned io_queues[HCTX_MAX_TYPES]; 127 unsigned int num_vecs; 128 u32 q_depth; 129 int io_sqes; 130 u32 db_stride; 131 void __iomem *bar; 132 unsigned long bar_mapped_size; 133 struct work_struct remove_work; 134 struct mutex shutdown_lock; 135 bool subsystem; 136 u64 cmb_size; 137 bool cmb_use_sqes; 138 u32 cmbsz; 139 u32 cmbloc; 140 struct nvme_ctrl ctrl; 141 u32 last_ps; 142 bool hmb; 143 144 mempool_t *iod_mempool; 145 146 /* shadow doorbell buffer support: */ 147 u32 *dbbuf_dbs; 148 dma_addr_t dbbuf_dbs_dma_addr; 149 u32 *dbbuf_eis; 150 dma_addr_t dbbuf_eis_dma_addr; 151 152 /* host memory buffer support: */ 153 u64 host_mem_size; 154 u32 nr_host_mem_descs; 155 dma_addr_t host_mem_descs_dma; 156 struct nvme_host_mem_buf_desc *host_mem_descs; 157 void **host_mem_desc_bufs; 158 unsigned int nr_allocated_queues; 159 unsigned int nr_write_queues; 160 unsigned int nr_poll_queues; 161 162 bool attrs_added; 163 }; 164 165 static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 166 { 167 return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE, 168 NVME_PCI_MAX_QUEUE_SIZE); 169 } 170 171 static inline unsigned int sq_idx(unsigned int qid, u32 stride) 172 { 173 return qid * 2 * stride; 174 } 175 176 static inline unsigned int cq_idx(unsigned int qid, u32 stride) 177 { 178 return (qid * 2 + 1) * stride; 179 } 180 181 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 182 { 183 return container_of(ctrl, struct nvme_dev, ctrl); 184 } 185 186 /* 187 * An NVM Express queue. Each device has at least two (one for admin 188 * commands and one for I/O commands). 189 */ 190 struct nvme_queue { 191 struct nvme_dev *dev; 192 spinlock_t sq_lock; 193 void *sq_cmds; 194 /* only used for poll queues: */ 195 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 196 struct nvme_completion *cqes; 197 dma_addr_t sq_dma_addr; 198 dma_addr_t cq_dma_addr; 199 u32 __iomem *q_db; 200 u32 q_depth; 201 u16 cq_vector; 202 u16 sq_tail; 203 u16 last_sq_tail; 204 u16 cq_head; 205 u16 qid; 206 u8 cq_phase; 207 u8 sqes; 208 unsigned long flags; 209 #define NVMEQ_ENABLED 0 210 #define NVMEQ_SQ_CMB 1 211 #define NVMEQ_DELETE_ERROR 2 212 #define NVMEQ_POLLED 3 213 u32 *dbbuf_sq_db; 214 u32 *dbbuf_cq_db; 215 u32 *dbbuf_sq_ei; 216 u32 *dbbuf_cq_ei; 217 struct completion delete_done; 218 }; 219 220 /* 221 * The nvme_iod describes the data in an I/O. 222 * 223 * The sg pointer contains the list of PRP/SGL chunk allocations in addition 224 * to the actual struct scatterlist. 225 */ 226 struct nvme_iod { 227 struct nvme_request req; 228 struct nvme_command cmd; 229 bool use_sgl; 230 bool aborted; 231 s8 nr_allocations; /* PRP list pool allocations. 0 means small 232 pool in use */ 233 unsigned int dma_len; /* length of single DMA segment mapping */ 234 dma_addr_t first_dma; 235 dma_addr_t meta_dma; 236 struct sg_table sgt; 237 }; 238 239 static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) 240 { 241 return dev->nr_allocated_queues * 8 * dev->db_stride; 242 } 243 244 static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 245 { 246 unsigned int mem_size = nvme_dbbuf_size(dev); 247 248 if (dev->dbbuf_dbs) { 249 /* 250 * Clear the dbbuf memory so the driver doesn't observe stale 251 * values from the previous instantiation. 252 */ 253 memset(dev->dbbuf_dbs, 0, mem_size); 254 memset(dev->dbbuf_eis, 0, mem_size); 255 return 0; 256 } 257 258 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 259 &dev->dbbuf_dbs_dma_addr, 260 GFP_KERNEL); 261 if (!dev->dbbuf_dbs) 262 return -ENOMEM; 263 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 264 &dev->dbbuf_eis_dma_addr, 265 GFP_KERNEL); 266 if (!dev->dbbuf_eis) { 267 dma_free_coherent(dev->dev, mem_size, 268 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 269 dev->dbbuf_dbs = NULL; 270 return -ENOMEM; 271 } 272 273 return 0; 274 } 275 276 static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 277 { 278 unsigned int mem_size = nvme_dbbuf_size(dev); 279 280 if (dev->dbbuf_dbs) { 281 dma_free_coherent(dev->dev, mem_size, 282 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 283 dev->dbbuf_dbs = NULL; 284 } 285 if (dev->dbbuf_eis) { 286 dma_free_coherent(dev->dev, mem_size, 287 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 288 dev->dbbuf_eis = NULL; 289 } 290 } 291 292 static void nvme_dbbuf_init(struct nvme_dev *dev, 293 struct nvme_queue *nvmeq, int qid) 294 { 295 if (!dev->dbbuf_dbs || !qid) 296 return; 297 298 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 299 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 300 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 301 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 302 } 303 304 static void nvme_dbbuf_free(struct nvme_queue *nvmeq) 305 { 306 if (!nvmeq->qid) 307 return; 308 309 nvmeq->dbbuf_sq_db = NULL; 310 nvmeq->dbbuf_cq_db = NULL; 311 nvmeq->dbbuf_sq_ei = NULL; 312 nvmeq->dbbuf_cq_ei = NULL; 313 } 314 315 static void nvme_dbbuf_set(struct nvme_dev *dev) 316 { 317 struct nvme_command c = { }; 318 unsigned int i; 319 320 if (!dev->dbbuf_dbs) 321 return; 322 323 c.dbbuf.opcode = nvme_admin_dbbuf; 324 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 325 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 326 327 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 328 dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 329 /* Free memory and continue on */ 330 nvme_dbbuf_dma_free(dev); 331 332 for (i = 1; i <= dev->online_queues; i++) 333 nvme_dbbuf_free(&dev->queues[i]); 334 } 335 } 336 337 static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 338 { 339 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 340 } 341 342 /* Update dbbuf and return true if an MMIO is required */ 343 static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 344 volatile u32 *dbbuf_ei) 345 { 346 if (dbbuf_db) { 347 u16 old_value; 348 349 /* 350 * Ensure that the queue is written before updating 351 * the doorbell in memory 352 */ 353 wmb(); 354 355 old_value = *dbbuf_db; 356 *dbbuf_db = value; 357 358 /* 359 * Ensure that the doorbell is updated before reading the event 360 * index from memory. The controller needs to provide similar 361 * ordering to ensure the envent index is updated before reading 362 * the doorbell. 363 */ 364 mb(); 365 366 if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 367 return false; 368 } 369 370 return true; 371 } 372 373 /* 374 * Will slightly overestimate the number of pages needed. This is OK 375 * as it only leads to a small amount of wasted memory for the lifetime of 376 * the I/O. 377 */ 378 static int nvme_pci_npages_prp(void) 379 { 380 unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, 381 NVME_CTRL_PAGE_SIZE); 382 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 383 } 384 385 /* 386 * Calculates the number of pages needed for the SGL segments. For example a 4k 387 * page can accommodate 256 SGL descriptors. 388 */ 389 static int nvme_pci_npages_sgl(void) 390 { 391 return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), 392 PAGE_SIZE); 393 } 394 395 static size_t nvme_pci_iod_alloc_size(void) 396 { 397 size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl()); 398 399 return sizeof(__le64 *) * npages + 400 sizeof(struct scatterlist) * NVME_MAX_SEGS; 401 } 402 403 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 404 unsigned int hctx_idx) 405 { 406 struct nvme_dev *dev = data; 407 struct nvme_queue *nvmeq = &dev->queues[0]; 408 409 WARN_ON(hctx_idx != 0); 410 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 411 412 hctx->driver_data = nvmeq; 413 return 0; 414 } 415 416 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 417 unsigned int hctx_idx) 418 { 419 struct nvme_dev *dev = data; 420 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; 421 422 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 423 hctx->driver_data = nvmeq; 424 return 0; 425 } 426 427 static int nvme_pci_init_request(struct blk_mq_tag_set *set, 428 struct request *req, unsigned int hctx_idx, 429 unsigned int numa_node) 430 { 431 struct nvme_dev *dev = set->driver_data; 432 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 433 434 nvme_req(req)->ctrl = &dev->ctrl; 435 nvme_req(req)->cmd = &iod->cmd; 436 return 0; 437 } 438 439 static int queue_irq_offset(struct nvme_dev *dev) 440 { 441 /* if we have more than 1 vec, admin queue offsets us by 1 */ 442 if (dev->num_vecs > 1) 443 return 1; 444 445 return 0; 446 } 447 448 static void nvme_pci_map_queues(struct blk_mq_tag_set *set) 449 { 450 struct nvme_dev *dev = set->driver_data; 451 int i, qoff, offset; 452 453 offset = queue_irq_offset(dev); 454 for (i = 0, qoff = 0; i < set->nr_maps; i++) { 455 struct blk_mq_queue_map *map = &set->map[i]; 456 457 map->nr_queues = dev->io_queues[i]; 458 if (!map->nr_queues) { 459 BUG_ON(i == HCTX_TYPE_DEFAULT); 460 continue; 461 } 462 463 /* 464 * The poll queue(s) doesn't have an IRQ (and hence IRQ 465 * affinity), so use the regular blk-mq cpu mapping 466 */ 467 map->queue_offset = qoff; 468 if (i != HCTX_TYPE_POLL && offset) 469 blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); 470 else 471 blk_mq_map_queues(map); 472 qoff += map->nr_queues; 473 offset += map->nr_queues; 474 } 475 } 476 477 /* 478 * Write sq tail if we are asked to, or if the next command would wrap. 479 */ 480 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 481 { 482 if (!write_sq) { 483 u16 next_tail = nvmeq->sq_tail + 1; 484 485 if (next_tail == nvmeq->q_depth) 486 next_tail = 0; 487 if (next_tail != nvmeq->last_sq_tail) 488 return; 489 } 490 491 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 492 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 493 writel(nvmeq->sq_tail, nvmeq->q_db); 494 nvmeq->last_sq_tail = nvmeq->sq_tail; 495 } 496 497 static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, 498 struct nvme_command *cmd) 499 { 500 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), 501 absolute_pointer(cmd), sizeof(*cmd)); 502 if (++nvmeq->sq_tail == nvmeq->q_depth) 503 nvmeq->sq_tail = 0; 504 } 505 506 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 507 { 508 struct nvme_queue *nvmeq = hctx->driver_data; 509 510 spin_lock(&nvmeq->sq_lock); 511 if (nvmeq->sq_tail != nvmeq->last_sq_tail) 512 nvme_write_sq_db(nvmeq, true); 513 spin_unlock(&nvmeq->sq_lock); 514 } 515 516 static void **nvme_pci_iod_list(struct request *req) 517 { 518 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 519 return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req)); 520 } 521 522 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) 523 { 524 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 525 int nseg = blk_rq_nr_phys_segments(req); 526 unsigned int avg_seg_size; 527 528 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 529 530 if (!nvme_ctrl_sgl_supported(&dev->ctrl)) 531 return false; 532 if (!nvmeq->qid) 533 return false; 534 if (!sgl_threshold || avg_seg_size < sgl_threshold) 535 return false; 536 return true; 537 } 538 539 static void nvme_free_prps(struct nvme_dev *dev, struct request *req) 540 { 541 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; 542 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 543 dma_addr_t dma_addr = iod->first_dma; 544 int i; 545 546 for (i = 0; i < iod->nr_allocations; i++) { 547 __le64 *prp_list = nvme_pci_iod_list(req)[i]; 548 dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); 549 550 dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); 551 dma_addr = next_dma_addr; 552 } 553 } 554 555 static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) 556 { 557 const int last_sg = SGES_PER_PAGE - 1; 558 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 559 dma_addr_t dma_addr = iod->first_dma; 560 int i; 561 562 for (i = 0; i < iod->nr_allocations; i++) { 563 struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i]; 564 dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr); 565 566 dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); 567 dma_addr = next_dma_addr; 568 } 569 } 570 571 static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 572 { 573 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 574 575 if (iod->dma_len) { 576 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, 577 rq_dma_dir(req)); 578 return; 579 } 580 581 WARN_ON_ONCE(!iod->sgt.nents); 582 583 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); 584 585 if (iod->nr_allocations == 0) 586 dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], 587 iod->first_dma); 588 else if (iod->use_sgl) 589 nvme_free_sgls(dev, req); 590 else 591 nvme_free_prps(dev, req); 592 mempool_free(iod->sgt.sgl, dev->iod_mempool); 593 } 594 595 static void nvme_print_sgl(struct scatterlist *sgl, int nents) 596 { 597 int i; 598 struct scatterlist *sg; 599 600 for_each_sg(sgl, sg, nents, i) { 601 dma_addr_t phys = sg_phys(sg); 602 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 603 "dma_address:%pad dma_length:%d\n", 604 i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 605 sg_dma_len(sg)); 606 } 607 } 608 609 static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, 610 struct request *req, struct nvme_rw_command *cmnd) 611 { 612 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 613 struct dma_pool *pool; 614 int length = blk_rq_payload_bytes(req); 615 struct scatterlist *sg = iod->sgt.sgl; 616 int dma_len = sg_dma_len(sg); 617 u64 dma_addr = sg_dma_address(sg); 618 int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); 619 __le64 *prp_list; 620 void **list = nvme_pci_iod_list(req); 621 dma_addr_t prp_dma; 622 int nprps, i; 623 624 length -= (NVME_CTRL_PAGE_SIZE - offset); 625 if (length <= 0) { 626 iod->first_dma = 0; 627 goto done; 628 } 629 630 dma_len -= (NVME_CTRL_PAGE_SIZE - offset); 631 if (dma_len) { 632 dma_addr += (NVME_CTRL_PAGE_SIZE - offset); 633 } else { 634 sg = sg_next(sg); 635 dma_addr = sg_dma_address(sg); 636 dma_len = sg_dma_len(sg); 637 } 638 639 if (length <= NVME_CTRL_PAGE_SIZE) { 640 iod->first_dma = dma_addr; 641 goto done; 642 } 643 644 nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); 645 if (nprps <= (256 / 8)) { 646 pool = dev->prp_small_pool; 647 iod->nr_allocations = 0; 648 } else { 649 pool = dev->prp_page_pool; 650 iod->nr_allocations = 1; 651 } 652 653 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 654 if (!prp_list) { 655 iod->nr_allocations = -1; 656 return BLK_STS_RESOURCE; 657 } 658 list[0] = prp_list; 659 iod->first_dma = prp_dma; 660 i = 0; 661 for (;;) { 662 if (i == NVME_CTRL_PAGE_SIZE >> 3) { 663 __le64 *old_prp_list = prp_list; 664 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 665 if (!prp_list) 666 goto free_prps; 667 list[iod->nr_allocations++] = prp_list; 668 prp_list[0] = old_prp_list[i - 1]; 669 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 670 i = 1; 671 } 672 prp_list[i++] = cpu_to_le64(dma_addr); 673 dma_len -= NVME_CTRL_PAGE_SIZE; 674 dma_addr += NVME_CTRL_PAGE_SIZE; 675 length -= NVME_CTRL_PAGE_SIZE; 676 if (length <= 0) 677 break; 678 if (dma_len > 0) 679 continue; 680 if (unlikely(dma_len < 0)) 681 goto bad_sgl; 682 sg = sg_next(sg); 683 dma_addr = sg_dma_address(sg); 684 dma_len = sg_dma_len(sg); 685 } 686 done: 687 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); 688 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); 689 return BLK_STS_OK; 690 free_prps: 691 nvme_free_prps(dev, req); 692 return BLK_STS_RESOURCE; 693 bad_sgl: 694 WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), 695 "Invalid SGL for payload:%d nents:%d\n", 696 blk_rq_payload_bytes(req), iod->sgt.nents); 697 return BLK_STS_IOERR; 698 } 699 700 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 701 struct scatterlist *sg) 702 { 703 sge->addr = cpu_to_le64(sg_dma_address(sg)); 704 sge->length = cpu_to_le32(sg_dma_len(sg)); 705 sge->type = NVME_SGL_FMT_DATA_DESC << 4; 706 } 707 708 static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 709 dma_addr_t dma_addr, int entries) 710 { 711 sge->addr = cpu_to_le64(dma_addr); 712 if (entries < SGES_PER_PAGE) { 713 sge->length = cpu_to_le32(entries * sizeof(*sge)); 714 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 715 } else { 716 sge->length = cpu_to_le32(PAGE_SIZE); 717 sge->type = NVME_SGL_FMT_SEG_DESC << 4; 718 } 719 } 720 721 static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 722 struct request *req, struct nvme_rw_command *cmd) 723 { 724 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 725 struct dma_pool *pool; 726 struct nvme_sgl_desc *sg_list; 727 struct scatterlist *sg = iod->sgt.sgl; 728 unsigned int entries = iod->sgt.nents; 729 dma_addr_t sgl_dma; 730 int i = 0; 731 732 /* setting the transfer type as SGL */ 733 cmd->flags = NVME_CMD_SGL_METABUF; 734 735 if (entries == 1) { 736 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 737 return BLK_STS_OK; 738 } 739 740 if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { 741 pool = dev->prp_small_pool; 742 iod->nr_allocations = 0; 743 } else { 744 pool = dev->prp_page_pool; 745 iod->nr_allocations = 1; 746 } 747 748 sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 749 if (!sg_list) { 750 iod->nr_allocations = -1; 751 return BLK_STS_RESOURCE; 752 } 753 754 nvme_pci_iod_list(req)[0] = sg_list; 755 iod->first_dma = sgl_dma; 756 757 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); 758 759 do { 760 if (i == SGES_PER_PAGE) { 761 struct nvme_sgl_desc *old_sg_desc = sg_list; 762 struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; 763 764 sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 765 if (!sg_list) 766 goto free_sgls; 767 768 i = 0; 769 nvme_pci_iod_list(req)[iod->nr_allocations++] = sg_list; 770 sg_list[i++] = *link; 771 nvme_pci_sgl_set_seg(link, sgl_dma, entries); 772 } 773 774 nvme_pci_sgl_set_data(&sg_list[i++], sg); 775 sg = sg_next(sg); 776 } while (--entries > 0); 777 778 return BLK_STS_OK; 779 free_sgls: 780 nvme_free_sgls(dev, req); 781 return BLK_STS_RESOURCE; 782 } 783 784 static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, 785 struct request *req, struct nvme_rw_command *cmnd, 786 struct bio_vec *bv) 787 { 788 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 789 unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); 790 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; 791 792 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 793 if (dma_mapping_error(dev->dev, iod->first_dma)) 794 return BLK_STS_RESOURCE; 795 iod->dma_len = bv->bv_len; 796 797 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); 798 if (bv->bv_len > first_prp_len) 799 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); 800 return BLK_STS_OK; 801 } 802 803 static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, 804 struct request *req, struct nvme_rw_command *cmnd, 805 struct bio_vec *bv) 806 { 807 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 808 809 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 810 if (dma_mapping_error(dev->dev, iod->first_dma)) 811 return BLK_STS_RESOURCE; 812 iod->dma_len = bv->bv_len; 813 814 cmnd->flags = NVME_CMD_SGL_METABUF; 815 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); 816 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); 817 cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; 818 return BLK_STS_OK; 819 } 820 821 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 822 struct nvme_command *cmnd) 823 { 824 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 825 blk_status_t ret = BLK_STS_RESOURCE; 826 int rc; 827 828 if (blk_rq_nr_phys_segments(req) == 1) { 829 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 830 struct bio_vec bv = req_bvec(req); 831 832 if (!is_pci_p2pdma_page(bv.bv_page)) { 833 if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) 834 return nvme_setup_prp_simple(dev, req, 835 &cmnd->rw, &bv); 836 837 if (nvmeq->qid && sgl_threshold && 838 nvme_ctrl_sgl_supported(&dev->ctrl)) 839 return nvme_setup_sgl_simple(dev, req, 840 &cmnd->rw, &bv); 841 } 842 } 843 844 iod->dma_len = 0; 845 iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); 846 if (!iod->sgt.sgl) 847 return BLK_STS_RESOURCE; 848 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); 849 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); 850 if (!iod->sgt.orig_nents) 851 goto out_free_sg; 852 853 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 854 DMA_ATTR_NO_WARN); 855 if (rc) { 856 if (rc == -EREMOTEIO) 857 ret = BLK_STS_TARGET; 858 goto out_free_sg; 859 } 860 861 iod->use_sgl = nvme_pci_use_sgls(dev, req); 862 if (iod->use_sgl) 863 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); 864 else 865 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 866 if (ret != BLK_STS_OK) 867 goto out_unmap_sg; 868 return BLK_STS_OK; 869 870 out_unmap_sg: 871 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); 872 out_free_sg: 873 mempool_free(iod->sgt.sgl, dev->iod_mempool); 874 return ret; 875 } 876 877 static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, 878 struct nvme_command *cmnd) 879 { 880 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 881 882 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), 883 rq_dma_dir(req), 0); 884 if (dma_mapping_error(dev->dev, iod->meta_dma)) 885 return BLK_STS_IOERR; 886 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); 887 return BLK_STS_OK; 888 } 889 890 static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) 891 { 892 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 893 blk_status_t ret; 894 895 iod->aborted = false; 896 iod->nr_allocations = -1; 897 iod->sgt.nents = 0; 898 899 ret = nvme_setup_cmd(req->q->queuedata, req); 900 if (ret) 901 return ret; 902 903 if (blk_rq_nr_phys_segments(req)) { 904 ret = nvme_map_data(dev, req, &iod->cmd); 905 if (ret) 906 goto out_free_cmd; 907 } 908 909 if (blk_integrity_rq(req)) { 910 ret = nvme_map_metadata(dev, req, &iod->cmd); 911 if (ret) 912 goto out_unmap_data; 913 } 914 915 blk_mq_start_request(req); 916 return BLK_STS_OK; 917 out_unmap_data: 918 nvme_unmap_data(dev, req); 919 out_free_cmd: 920 nvme_cleanup_cmd(req); 921 return ret; 922 } 923 924 /* 925 * NOTE: ns is NULL when called on the admin queue. 926 */ 927 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 928 const struct blk_mq_queue_data *bd) 929 { 930 struct nvme_queue *nvmeq = hctx->driver_data; 931 struct nvme_dev *dev = nvmeq->dev; 932 struct request *req = bd->rq; 933 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 934 blk_status_t ret; 935 936 /* 937 * We should not need to do this, but we're still using this to 938 * ensure we can drain requests on a dying queue. 939 */ 940 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 941 return BLK_STS_IOERR; 942 943 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) 944 return nvme_fail_nonready_command(&dev->ctrl, req); 945 946 ret = nvme_prep_rq(dev, req); 947 if (unlikely(ret)) 948 return ret; 949 spin_lock(&nvmeq->sq_lock); 950 nvme_sq_copy_cmd(nvmeq, &iod->cmd); 951 nvme_write_sq_db(nvmeq, bd->last); 952 spin_unlock(&nvmeq->sq_lock); 953 return BLK_STS_OK; 954 } 955 956 static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) 957 { 958 spin_lock(&nvmeq->sq_lock); 959 while (!rq_list_empty(*rqlist)) { 960 struct request *req = rq_list_pop(rqlist); 961 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 962 963 nvme_sq_copy_cmd(nvmeq, &iod->cmd); 964 } 965 nvme_write_sq_db(nvmeq, true); 966 spin_unlock(&nvmeq->sq_lock); 967 } 968 969 static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) 970 { 971 /* 972 * We should not need to do this, but we're still using this to 973 * ensure we can drain requests on a dying queue. 974 */ 975 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 976 return false; 977 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) 978 return false; 979 980 req->mq_hctx->tags->rqs[req->tag] = req; 981 return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; 982 } 983 984 static void nvme_queue_rqs(struct request **rqlist) 985 { 986 struct request *req, *next, *prev = NULL; 987 struct request *requeue_list = NULL; 988 989 rq_list_for_each_safe(rqlist, req, next) { 990 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 991 992 if (!nvme_prep_rq_batch(nvmeq, req)) { 993 /* detach 'req' and add to remainder list */ 994 rq_list_move(rqlist, &requeue_list, req, prev); 995 996 req = prev; 997 if (!req) 998 continue; 999 } 1000 1001 if (!next || req->mq_hctx != next->mq_hctx) { 1002 /* detach rest of list, and submit */ 1003 req->rq_next = NULL; 1004 nvme_submit_cmds(nvmeq, rqlist); 1005 *rqlist = next; 1006 prev = NULL; 1007 } else 1008 prev = req; 1009 } 1010 1011 *rqlist = requeue_list; 1012 } 1013 1014 static __always_inline void nvme_pci_unmap_rq(struct request *req) 1015 { 1016 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1017 struct nvme_dev *dev = nvmeq->dev; 1018 1019 if (blk_integrity_rq(req)) { 1020 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1021 1022 dma_unmap_page(dev->dev, iod->meta_dma, 1023 rq_integrity_vec(req)->bv_len, rq_data_dir(req)); 1024 } 1025 1026 if (blk_rq_nr_phys_segments(req)) 1027 nvme_unmap_data(dev, req); 1028 } 1029 1030 static void nvme_pci_complete_rq(struct request *req) 1031 { 1032 nvme_pci_unmap_rq(req); 1033 nvme_complete_rq(req); 1034 } 1035 1036 static void nvme_pci_complete_batch(struct io_comp_batch *iob) 1037 { 1038 nvme_complete_batch(iob, nvme_pci_unmap_rq); 1039 } 1040 1041 /* We read the CQE phase first to check if the rest of the entry is valid */ 1042 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 1043 { 1044 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; 1045 1046 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; 1047 } 1048 1049 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 1050 { 1051 u16 head = nvmeq->cq_head; 1052 1053 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 1054 nvmeq->dbbuf_cq_ei)) 1055 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 1056 } 1057 1058 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) 1059 { 1060 if (!nvmeq->qid) 1061 return nvmeq->dev->admin_tagset.tags[0]; 1062 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; 1063 } 1064 1065 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, 1066 struct io_comp_batch *iob, u16 idx) 1067 { 1068 struct nvme_completion *cqe = &nvmeq->cqes[idx]; 1069 __u16 command_id = READ_ONCE(cqe->command_id); 1070 struct request *req; 1071 1072 /* 1073 * AEN requests are special as they don't time out and can 1074 * survive any kind of queue freeze and often don't respond to 1075 * aborts. We don't even bother to allocate a struct request 1076 * for them but rather special case them here. 1077 */ 1078 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { 1079 nvme_complete_async_event(&nvmeq->dev->ctrl, 1080 cqe->status, &cqe->result); 1081 return; 1082 } 1083 1084 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); 1085 if (unlikely(!req)) { 1086 dev_warn(nvmeq->dev->ctrl.device, 1087 "invalid id %d completed on queue %d\n", 1088 command_id, le16_to_cpu(cqe->sq_id)); 1089 return; 1090 } 1091 1092 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 1093 if (!nvme_try_complete_req(req, cqe->status, cqe->result) && 1094 !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, 1095 nvme_pci_complete_batch)) 1096 nvme_pci_complete_rq(req); 1097 } 1098 1099 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 1100 { 1101 u32 tmp = nvmeq->cq_head + 1; 1102 1103 if (tmp == nvmeq->q_depth) { 1104 nvmeq->cq_head = 0; 1105 nvmeq->cq_phase ^= 1; 1106 } else { 1107 nvmeq->cq_head = tmp; 1108 } 1109 } 1110 1111 static inline int nvme_poll_cq(struct nvme_queue *nvmeq, 1112 struct io_comp_batch *iob) 1113 { 1114 int found = 0; 1115 1116 while (nvme_cqe_pending(nvmeq)) { 1117 found++; 1118 /* 1119 * load-load control dependency between phase and the rest of 1120 * the cqe requires a full read memory barrier 1121 */ 1122 dma_rmb(); 1123 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); 1124 nvme_update_cq_head(nvmeq); 1125 } 1126 1127 if (found) 1128 nvme_ring_cq_doorbell(nvmeq); 1129 return found; 1130 } 1131 1132 static irqreturn_t nvme_irq(int irq, void *data) 1133 { 1134 struct nvme_queue *nvmeq = data; 1135 DEFINE_IO_COMP_BATCH(iob); 1136 1137 if (nvme_poll_cq(nvmeq, &iob)) { 1138 if (!rq_list_empty(iob.req_list)) 1139 nvme_pci_complete_batch(&iob); 1140 return IRQ_HANDLED; 1141 } 1142 return IRQ_NONE; 1143 } 1144 1145 static irqreturn_t nvme_irq_check(int irq, void *data) 1146 { 1147 struct nvme_queue *nvmeq = data; 1148 1149 if (nvme_cqe_pending(nvmeq)) 1150 return IRQ_WAKE_THREAD; 1151 return IRQ_NONE; 1152 } 1153 1154 /* 1155 * Poll for completions for any interrupt driven queue 1156 * Can be called from any context. 1157 */ 1158 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1159 { 1160 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1161 1162 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1163 1164 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1165 nvme_poll_cq(nvmeq, NULL); 1166 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1167 } 1168 1169 static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 1170 { 1171 struct nvme_queue *nvmeq = hctx->driver_data; 1172 bool found; 1173 1174 if (!nvme_cqe_pending(nvmeq)) 1175 return 0; 1176 1177 spin_lock(&nvmeq->cq_poll_lock); 1178 found = nvme_poll_cq(nvmeq, iob); 1179 spin_unlock(&nvmeq->cq_poll_lock); 1180 1181 return found; 1182 } 1183 1184 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 1185 { 1186 struct nvme_dev *dev = to_nvme_dev(ctrl); 1187 struct nvme_queue *nvmeq = &dev->queues[0]; 1188 struct nvme_command c = { }; 1189 1190 c.common.opcode = nvme_admin_async_event; 1191 c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 1192 1193 spin_lock(&nvmeq->sq_lock); 1194 nvme_sq_copy_cmd(nvmeq, &c); 1195 nvme_write_sq_db(nvmeq, true); 1196 spin_unlock(&nvmeq->sq_lock); 1197 } 1198 1199 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 1200 { 1201 struct nvme_command c = { }; 1202 1203 c.delete_queue.opcode = opcode; 1204 c.delete_queue.qid = cpu_to_le16(id); 1205 1206 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1207 } 1208 1209 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1210 struct nvme_queue *nvmeq, s16 vector) 1211 { 1212 struct nvme_command c = { }; 1213 int flags = NVME_QUEUE_PHYS_CONTIG; 1214 1215 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) 1216 flags |= NVME_CQ_IRQ_ENABLED; 1217 1218 /* 1219 * Note: we (ab)use the fact that the prp fields survive if no data 1220 * is attached to the request. 1221 */ 1222 c.create_cq.opcode = nvme_admin_create_cq; 1223 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 1224 c.create_cq.cqid = cpu_to_le16(qid); 1225 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 1226 c.create_cq.cq_flags = cpu_to_le16(flags); 1227 c.create_cq.irq_vector = cpu_to_le16(vector); 1228 1229 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1230 } 1231 1232 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 1233 struct nvme_queue *nvmeq) 1234 { 1235 struct nvme_ctrl *ctrl = &dev->ctrl; 1236 struct nvme_command c = { }; 1237 int flags = NVME_QUEUE_PHYS_CONTIG; 1238 1239 /* 1240 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 1241 * set. Since URGENT priority is zeroes, it makes all queues 1242 * URGENT. 1243 */ 1244 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 1245 flags |= NVME_SQ_PRIO_MEDIUM; 1246 1247 /* 1248 * Note: we (ab)use the fact that the prp fields survive if no data 1249 * is attached to the request. 1250 */ 1251 c.create_sq.opcode = nvme_admin_create_sq; 1252 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 1253 c.create_sq.sqid = cpu_to_le16(qid); 1254 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 1255 c.create_sq.sq_flags = cpu_to_le16(flags); 1256 c.create_sq.cqid = cpu_to_le16(qid); 1257 1258 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1259 } 1260 1261 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 1262 { 1263 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 1264 } 1265 1266 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 1267 { 1268 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 1269 } 1270 1271 static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error) 1272 { 1273 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1274 1275 dev_warn(nvmeq->dev->ctrl.device, 1276 "Abort status: 0x%x", nvme_req(req)->status); 1277 atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1278 blk_mq_free_request(req); 1279 return RQ_END_IO_NONE; 1280 } 1281 1282 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1283 { 1284 /* If true, indicates loss of adapter communication, possibly by a 1285 * NVMe Subsystem reset. 1286 */ 1287 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1288 1289 /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1290 switch (dev->ctrl.state) { 1291 case NVME_CTRL_RESETTING: 1292 case NVME_CTRL_CONNECTING: 1293 return false; 1294 default: 1295 break; 1296 } 1297 1298 /* We shouldn't reset unless the controller is on fatal error state 1299 * _or_ if we lost the communication with it. 1300 */ 1301 if (!(csts & NVME_CSTS_CFS) && !nssro) 1302 return false; 1303 1304 return true; 1305 } 1306 1307 static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1308 { 1309 /* Read a config register to help see what died. */ 1310 u16 pci_status; 1311 int result; 1312 1313 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1314 &pci_status); 1315 if (result == PCIBIOS_SUCCESSFUL) 1316 dev_warn(dev->ctrl.device, 1317 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1318 csts, pci_status); 1319 else 1320 dev_warn(dev->ctrl.device, 1321 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1322 csts, result); 1323 1324 if (csts != ~0) 1325 return; 1326 1327 dev_warn(dev->ctrl.device, 1328 "Does your device have a faulty power saving mode enabled?\n"); 1329 dev_warn(dev->ctrl.device, 1330 "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); 1331 } 1332 1333 static enum blk_eh_timer_return nvme_timeout(struct request *req) 1334 { 1335 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1336 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1337 struct nvme_dev *dev = nvmeq->dev; 1338 struct request *abort_req; 1339 struct nvme_command cmd = { }; 1340 u32 csts = readl(dev->bar + NVME_REG_CSTS); 1341 1342 /* If PCI error recovery process is happening, we cannot reset or 1343 * the recovery mechanism will surely fail. 1344 */ 1345 mb(); 1346 if (pci_channel_offline(to_pci_dev(dev->dev))) 1347 return BLK_EH_RESET_TIMER; 1348 1349 /* 1350 * Reset immediately if the controller is failed 1351 */ 1352 if (nvme_should_reset(dev, csts)) { 1353 nvme_warn_reset(dev, csts); 1354 nvme_dev_disable(dev, false); 1355 nvme_reset_ctrl(&dev->ctrl); 1356 return BLK_EH_DONE; 1357 } 1358 1359 /* 1360 * Did we miss an interrupt? 1361 */ 1362 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) 1363 nvme_poll(req->mq_hctx, NULL); 1364 else 1365 nvme_poll_irqdisable(nvmeq); 1366 1367 if (blk_mq_request_completed(req)) { 1368 dev_warn(dev->ctrl.device, 1369 "I/O %d QID %d timeout, completion polled\n", 1370 req->tag, nvmeq->qid); 1371 return BLK_EH_DONE; 1372 } 1373 1374 /* 1375 * Shutdown immediately if controller times out while starting. The 1376 * reset work will see the pci device disabled when it gets the forced 1377 * cancellation error. All outstanding requests are completed on 1378 * shutdown, so we return BLK_EH_DONE. 1379 */ 1380 switch (dev->ctrl.state) { 1381 case NVME_CTRL_CONNECTING: 1382 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 1383 fallthrough; 1384 case NVME_CTRL_DELETING: 1385 dev_warn_ratelimited(dev->ctrl.device, 1386 "I/O %d QID %d timeout, disable controller\n", 1387 req->tag, nvmeq->qid); 1388 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1389 nvme_dev_disable(dev, true); 1390 return BLK_EH_DONE; 1391 case NVME_CTRL_RESETTING: 1392 return BLK_EH_RESET_TIMER; 1393 default: 1394 break; 1395 } 1396 1397 /* 1398 * Shutdown the controller immediately and schedule a reset if the 1399 * command was already aborted once before and still hasn't been 1400 * returned to the driver, or if this is the admin queue. 1401 */ 1402 if (!nvmeq->qid || iod->aborted) { 1403 dev_warn(dev->ctrl.device, 1404 "I/O %d QID %d timeout, reset controller\n", 1405 req->tag, nvmeq->qid); 1406 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1407 nvme_dev_disable(dev, false); 1408 nvme_reset_ctrl(&dev->ctrl); 1409 1410 return BLK_EH_DONE; 1411 } 1412 1413 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1414 atomic_inc(&dev->ctrl.abort_limit); 1415 return BLK_EH_RESET_TIMER; 1416 } 1417 iod->aborted = true; 1418 1419 cmd.abort.opcode = nvme_admin_abort_cmd; 1420 cmd.abort.cid = nvme_cid(req); 1421 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 1422 1423 dev_warn(nvmeq->dev->ctrl.device, 1424 "I/O %d (%s) QID %d timeout, aborting\n", 1425 req->tag, 1426 nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode), 1427 nvmeq->qid); 1428 1429 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), 1430 BLK_MQ_REQ_NOWAIT); 1431 if (IS_ERR(abort_req)) { 1432 atomic_inc(&dev->ctrl.abort_limit); 1433 return BLK_EH_RESET_TIMER; 1434 } 1435 nvme_init_request(abort_req, &cmd); 1436 1437 abort_req->end_io = abort_endio; 1438 abort_req->end_io_data = NULL; 1439 blk_execute_rq_nowait(abort_req, false); 1440 1441 /* 1442 * The aborted req will be completed on receiving the abort req. 1443 * We enable the timer again. If hit twice, it'll cause a device reset, 1444 * as the device then is in a faulty state. 1445 */ 1446 return BLK_EH_RESET_TIMER; 1447 } 1448 1449 static void nvme_free_queue(struct nvme_queue *nvmeq) 1450 { 1451 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), 1452 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1453 if (!nvmeq->sq_cmds) 1454 return; 1455 1456 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 1457 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 1458 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1459 } else { 1460 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), 1461 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1462 } 1463 } 1464 1465 static void nvme_free_queues(struct nvme_dev *dev, int lowest) 1466 { 1467 int i; 1468 1469 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1470 dev->ctrl.queue_count--; 1471 nvme_free_queue(&dev->queues[i]); 1472 } 1473 } 1474 1475 /** 1476 * nvme_suspend_queue - put queue into suspended state 1477 * @nvmeq: queue to suspend 1478 */ 1479 static int nvme_suspend_queue(struct nvme_queue *nvmeq) 1480 { 1481 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 1482 return 1; 1483 1484 /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1485 mb(); 1486 1487 nvmeq->dev->online_queues--; 1488 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 1489 nvme_stop_admin_queue(&nvmeq->dev->ctrl); 1490 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) 1491 pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); 1492 return 0; 1493 } 1494 1495 static void nvme_suspend_io_queues(struct nvme_dev *dev) 1496 { 1497 int i; 1498 1499 for (i = dev->ctrl.queue_count - 1; i > 0; i--) 1500 nvme_suspend_queue(&dev->queues[i]); 1501 } 1502 1503 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 1504 { 1505 struct nvme_queue *nvmeq = &dev->queues[0]; 1506 1507 if (shutdown) 1508 nvme_shutdown_ctrl(&dev->ctrl); 1509 else 1510 nvme_disable_ctrl(&dev->ctrl); 1511 1512 nvme_poll_irqdisable(nvmeq); 1513 } 1514 1515 /* 1516 * Called only on a device that has been disabled and after all other threads 1517 * that can check this device's completion queues have synced, except 1518 * nvme_poll(). This is the last chance for the driver to see a natural 1519 * completion before nvme_cancel_request() terminates all incomplete requests. 1520 */ 1521 static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1522 { 1523 int i; 1524 1525 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 1526 spin_lock(&dev->queues[i].cq_poll_lock); 1527 nvme_poll_cq(&dev->queues[i], NULL); 1528 spin_unlock(&dev->queues[i].cq_poll_lock); 1529 } 1530 } 1531 1532 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 1533 int entry_size) 1534 { 1535 int q_depth = dev->q_depth; 1536 unsigned q_size_aligned = roundup(q_depth * entry_size, 1537 NVME_CTRL_PAGE_SIZE); 1538 1539 if (q_size_aligned * nr_io_queues > dev->cmb_size) { 1540 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 1541 1542 mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); 1543 q_depth = div_u64(mem_per_q, entry_size); 1544 1545 /* 1546 * Ensure the reduced q_depth is above some threshold where it 1547 * would be better to map queues in system memory with the 1548 * original depth 1549 */ 1550 if (q_depth < 64) 1551 return -ENOMEM; 1552 } 1553 1554 return q_depth; 1555 } 1556 1557 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1558 int qid) 1559 { 1560 struct pci_dev *pdev = to_pci_dev(dev->dev); 1561 1562 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 1563 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); 1564 if (nvmeq->sq_cmds) { 1565 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 1566 nvmeq->sq_cmds); 1567 if (nvmeq->sq_dma_addr) { 1568 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 1569 return 0; 1570 } 1571 1572 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1573 } 1574 } 1575 1576 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), 1577 &nvmeq->sq_dma_addr, GFP_KERNEL); 1578 if (!nvmeq->sq_cmds) 1579 return -ENOMEM; 1580 return 0; 1581 } 1582 1583 static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 1584 { 1585 struct nvme_queue *nvmeq = &dev->queues[qid]; 1586 1587 if (dev->ctrl.queue_count > qid) 1588 return 0; 1589 1590 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; 1591 nvmeq->q_depth = depth; 1592 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 1593 &nvmeq->cq_dma_addr, GFP_KERNEL); 1594 if (!nvmeq->cqes) 1595 goto free_nvmeq; 1596 1597 if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) 1598 goto free_cqdma; 1599 1600 nvmeq->dev = dev; 1601 spin_lock_init(&nvmeq->sq_lock); 1602 spin_lock_init(&nvmeq->cq_poll_lock); 1603 nvmeq->cq_head = 0; 1604 nvmeq->cq_phase = 1; 1605 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1606 nvmeq->qid = qid; 1607 dev->ctrl.queue_count++; 1608 1609 return 0; 1610 1611 free_cqdma: 1612 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 1613 nvmeq->cq_dma_addr); 1614 free_nvmeq: 1615 return -ENOMEM; 1616 } 1617 1618 static int queue_request_irq(struct nvme_queue *nvmeq) 1619 { 1620 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1621 int nr = nvmeq->dev->ctrl.instance; 1622 1623 if (use_threaded_interrupts) { 1624 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 1625 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 1626 } else { 1627 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 1628 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 1629 } 1630 } 1631 1632 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 1633 { 1634 struct nvme_dev *dev = nvmeq->dev; 1635 1636 nvmeq->sq_tail = 0; 1637 nvmeq->last_sq_tail = 0; 1638 nvmeq->cq_head = 0; 1639 nvmeq->cq_phase = 1; 1640 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1641 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); 1642 nvme_dbbuf_init(dev, nvmeq, qid); 1643 dev->online_queues++; 1644 wmb(); /* ensure the first interrupt sees the initialization */ 1645 } 1646 1647 /* 1648 * Try getting shutdown_lock while setting up IO queues. 1649 */ 1650 static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) 1651 { 1652 /* 1653 * Give up if the lock is being held by nvme_dev_disable. 1654 */ 1655 if (!mutex_trylock(&dev->shutdown_lock)) 1656 return -ENODEV; 1657 1658 /* 1659 * Controller is in wrong state, fail early. 1660 */ 1661 if (dev->ctrl.state != NVME_CTRL_CONNECTING) { 1662 mutex_unlock(&dev->shutdown_lock); 1663 return -ENODEV; 1664 } 1665 1666 return 0; 1667 } 1668 1669 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 1670 { 1671 struct nvme_dev *dev = nvmeq->dev; 1672 int result; 1673 u16 vector = 0; 1674 1675 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1676 1677 /* 1678 * A queue's vector matches the queue identifier unless the controller 1679 * has only one vector available. 1680 */ 1681 if (!polled) 1682 vector = dev->num_vecs == 1 ? 0 : qid; 1683 else 1684 set_bit(NVMEQ_POLLED, &nvmeq->flags); 1685 1686 result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1687 if (result) 1688 return result; 1689 1690 result = adapter_alloc_sq(dev, qid, nvmeq); 1691 if (result < 0) 1692 return result; 1693 if (result) 1694 goto release_cq; 1695 1696 nvmeq->cq_vector = vector; 1697 1698 result = nvme_setup_io_queues_trylock(dev); 1699 if (result) 1700 return result; 1701 nvme_init_queue(nvmeq, qid); 1702 if (!polled) { 1703 result = queue_request_irq(nvmeq); 1704 if (result < 0) 1705 goto release_sq; 1706 } 1707 1708 set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1709 mutex_unlock(&dev->shutdown_lock); 1710 return result; 1711 1712 release_sq: 1713 dev->online_queues--; 1714 mutex_unlock(&dev->shutdown_lock); 1715 adapter_delete_sq(dev, qid); 1716 release_cq: 1717 adapter_delete_cq(dev, qid); 1718 return result; 1719 } 1720 1721 static const struct blk_mq_ops nvme_mq_admin_ops = { 1722 .queue_rq = nvme_queue_rq, 1723 .complete = nvme_pci_complete_rq, 1724 .init_hctx = nvme_admin_init_hctx, 1725 .init_request = nvme_pci_init_request, 1726 .timeout = nvme_timeout, 1727 }; 1728 1729 static const struct blk_mq_ops nvme_mq_ops = { 1730 .queue_rq = nvme_queue_rq, 1731 .queue_rqs = nvme_queue_rqs, 1732 .complete = nvme_pci_complete_rq, 1733 .commit_rqs = nvme_commit_rqs, 1734 .init_hctx = nvme_init_hctx, 1735 .init_request = nvme_pci_init_request, 1736 .map_queues = nvme_pci_map_queues, 1737 .timeout = nvme_timeout, 1738 .poll = nvme_poll, 1739 }; 1740 1741 static void nvme_dev_remove_admin(struct nvme_dev *dev) 1742 { 1743 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 1744 /* 1745 * If the controller was reset during removal, it's possible 1746 * user requests may be waiting on a stopped queue. Start the 1747 * queue to flush these to completion. 1748 */ 1749 nvme_start_admin_queue(&dev->ctrl); 1750 blk_mq_destroy_queue(dev->ctrl.admin_q); 1751 blk_mq_free_tag_set(&dev->admin_tagset); 1752 } 1753 } 1754 1755 static int nvme_pci_alloc_admin_tag_set(struct nvme_dev *dev) 1756 { 1757 struct blk_mq_tag_set *set = &dev->admin_tagset; 1758 1759 set->ops = &nvme_mq_admin_ops; 1760 set->nr_hw_queues = 1; 1761 1762 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1763 set->timeout = NVME_ADMIN_TIMEOUT; 1764 set->numa_node = dev->ctrl.numa_node; 1765 set->cmd_size = sizeof(struct nvme_iod); 1766 set->flags = BLK_MQ_F_NO_SCHED; 1767 set->driver_data = dev; 1768 1769 if (blk_mq_alloc_tag_set(set)) 1770 return -ENOMEM; 1771 dev->ctrl.admin_tagset = set; 1772 1773 dev->ctrl.admin_q = blk_mq_init_queue(set); 1774 if (IS_ERR(dev->ctrl.admin_q)) { 1775 blk_mq_free_tag_set(set); 1776 dev->ctrl.admin_q = NULL; 1777 return -ENOMEM; 1778 } 1779 if (!blk_get_queue(dev->ctrl.admin_q)) { 1780 nvme_dev_remove_admin(dev); 1781 dev->ctrl.admin_q = NULL; 1782 return -ENODEV; 1783 } 1784 return 0; 1785 } 1786 1787 static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 1788 { 1789 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 1790 } 1791 1792 static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 1793 { 1794 struct pci_dev *pdev = to_pci_dev(dev->dev); 1795 1796 if (size <= dev->bar_mapped_size) 1797 return 0; 1798 if (size > pci_resource_len(pdev, 0)) 1799 return -ENOMEM; 1800 if (dev->bar) 1801 iounmap(dev->bar); 1802 dev->bar = ioremap(pci_resource_start(pdev, 0), size); 1803 if (!dev->bar) { 1804 dev->bar_mapped_size = 0; 1805 return -ENOMEM; 1806 } 1807 dev->bar_mapped_size = size; 1808 dev->dbs = dev->bar + NVME_REG_DBS; 1809 1810 return 0; 1811 } 1812 1813 static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 1814 { 1815 int result; 1816 u32 aqa; 1817 struct nvme_queue *nvmeq; 1818 1819 result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 1820 if (result < 0) 1821 return result; 1822 1823 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 1824 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 1825 1826 if (dev->subsystem && 1827 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 1828 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 1829 1830 result = nvme_disable_ctrl(&dev->ctrl); 1831 if (result < 0) 1832 return result; 1833 1834 result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1835 if (result) 1836 return result; 1837 1838 dev->ctrl.numa_node = dev_to_node(dev->dev); 1839 1840 nvmeq = &dev->queues[0]; 1841 aqa = nvmeq->q_depth - 1; 1842 aqa |= aqa << 16; 1843 1844 writel(aqa, dev->bar + NVME_REG_AQA); 1845 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 1846 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 1847 1848 result = nvme_enable_ctrl(&dev->ctrl); 1849 if (result) 1850 return result; 1851 1852 nvmeq->cq_vector = 0; 1853 nvme_init_queue(nvmeq, 0); 1854 result = queue_request_irq(nvmeq); 1855 if (result) { 1856 dev->online_queues--; 1857 return result; 1858 } 1859 1860 set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1861 return result; 1862 } 1863 1864 static int nvme_create_io_queues(struct nvme_dev *dev) 1865 { 1866 unsigned i, max, rw_queues; 1867 int ret = 0; 1868 1869 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1870 if (nvme_alloc_queue(dev, i, dev->q_depth)) { 1871 ret = -ENOMEM; 1872 break; 1873 } 1874 } 1875 1876 max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1877 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 1878 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 1879 dev->io_queues[HCTX_TYPE_READ]; 1880 } else { 1881 rw_queues = max; 1882 } 1883 1884 for (i = dev->online_queues; i <= max; i++) { 1885 bool polled = i > rw_queues; 1886 1887 ret = nvme_create_queue(&dev->queues[i], i, polled); 1888 if (ret) 1889 break; 1890 } 1891 1892 /* 1893 * Ignore failing Create SQ/CQ commands, we can continue with less 1894 * than the desired amount of queues, and even a controller without 1895 * I/O queues can still be used to issue admin commands. This might 1896 * be useful to upgrade a buggy firmware for example. 1897 */ 1898 return ret >= 0 ? 0 : ret; 1899 } 1900 1901 static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 1902 { 1903 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 1904 1905 return 1ULL << (12 + 4 * szu); 1906 } 1907 1908 static u32 nvme_cmb_size(struct nvme_dev *dev) 1909 { 1910 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 1911 } 1912 1913 static void nvme_map_cmb(struct nvme_dev *dev) 1914 { 1915 u64 size, offset; 1916 resource_size_t bar_size; 1917 struct pci_dev *pdev = to_pci_dev(dev->dev); 1918 int bar; 1919 1920 if (dev->cmb_size) 1921 return; 1922 1923 if (NVME_CAP_CMBS(dev->ctrl.cap)) 1924 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); 1925 1926 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1927 if (!dev->cmbsz) 1928 return; 1929 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 1930 1931 size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 1932 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 1933 bar = NVME_CMB_BIR(dev->cmbloc); 1934 bar_size = pci_resource_len(pdev, bar); 1935 1936 if (offset > bar_size) 1937 return; 1938 1939 /* 1940 * Tell the controller about the host side address mapping the CMB, 1941 * and enable CMB decoding for the NVMe 1.4+ scheme: 1942 */ 1943 if (NVME_CAP_CMBS(dev->ctrl.cap)) { 1944 hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | 1945 (pci_bus_address(pdev, bar) + offset), 1946 dev->bar + NVME_REG_CMBMSC); 1947 } 1948 1949 /* 1950 * Controllers may support a CMB size larger than their BAR, 1951 * for example, due to being behind a bridge. Reduce the CMB to 1952 * the reported size of the BAR 1953 */ 1954 if (size > bar_size - offset) 1955 size = bar_size - offset; 1956 1957 if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 1958 dev_warn(dev->ctrl.device, 1959 "failed to register the CMB\n"); 1960 return; 1961 } 1962 1963 dev->cmb_size = size; 1964 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 1965 1966 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 1967 (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 1968 pci_p2pmem_publish(pdev, true); 1969 } 1970 1971 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 1972 { 1973 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; 1974 u64 dma_addr = dev->host_mem_descs_dma; 1975 struct nvme_command c = { }; 1976 int ret; 1977 1978 c.features.opcode = nvme_admin_set_features; 1979 c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 1980 c.features.dword11 = cpu_to_le32(bits); 1981 c.features.dword12 = cpu_to_le32(host_mem_size); 1982 c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 1983 c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 1984 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 1985 1986 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1987 if (ret) { 1988 dev_warn(dev->ctrl.device, 1989 "failed to set host mem (err %d, flags %#x).\n", 1990 ret, bits); 1991 } else 1992 dev->hmb = bits & NVME_HOST_MEM_ENABLE; 1993 1994 return ret; 1995 } 1996 1997 static void nvme_free_host_mem(struct nvme_dev *dev) 1998 { 1999 int i; 2000 2001 for (i = 0; i < dev->nr_host_mem_descs; i++) { 2002 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 2003 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; 2004 2005 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 2006 le64_to_cpu(desc->addr), 2007 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 2008 } 2009 2010 kfree(dev->host_mem_desc_bufs); 2011 dev->host_mem_desc_bufs = NULL; 2012 dma_free_coherent(dev->dev, 2013 dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), 2014 dev->host_mem_descs, dev->host_mem_descs_dma); 2015 dev->host_mem_descs = NULL; 2016 dev->nr_host_mem_descs = 0; 2017 } 2018 2019 static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, 2020 u32 chunk_size) 2021 { 2022 struct nvme_host_mem_buf_desc *descs; 2023 u32 max_entries, len; 2024 dma_addr_t descs_dma; 2025 int i = 0; 2026 void **bufs; 2027 u64 size, tmp; 2028 2029 tmp = (preferred + chunk_size - 1); 2030 do_div(tmp, chunk_size); 2031 max_entries = tmp; 2032 2033 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 2034 max_entries = dev->ctrl.hmmaxd; 2035 2036 descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), 2037 &descs_dma, GFP_KERNEL); 2038 if (!descs) 2039 goto out; 2040 2041 bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 2042 if (!bufs) 2043 goto out_free_descs; 2044 2045 for (size = 0; size < preferred && i < max_entries; size += len) { 2046 dma_addr_t dma_addr; 2047 2048 len = min_t(u64, chunk_size, preferred - size); 2049 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 2050 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 2051 if (!bufs[i]) 2052 break; 2053 2054 descs[i].addr = cpu_to_le64(dma_addr); 2055 descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); 2056 i++; 2057 } 2058 2059 if (!size) 2060 goto out_free_bufs; 2061 2062 dev->nr_host_mem_descs = i; 2063 dev->host_mem_size = size; 2064 dev->host_mem_descs = descs; 2065 dev->host_mem_descs_dma = descs_dma; 2066 dev->host_mem_desc_bufs = bufs; 2067 return 0; 2068 2069 out_free_bufs: 2070 while (--i >= 0) { 2071 size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; 2072 2073 dma_free_attrs(dev->dev, size, bufs[i], 2074 le64_to_cpu(descs[i].addr), 2075 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 2076 } 2077 2078 kfree(bufs); 2079 out_free_descs: 2080 dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, 2081 descs_dma); 2082 out: 2083 dev->host_mem_descs = NULL; 2084 return -ENOMEM; 2085 } 2086 2087 static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 2088 { 2089 u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 2090 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 2091 u64 chunk_size; 2092 2093 /* start big and work our way down */ 2094 for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { 2095 if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { 2096 if (!min || dev->host_mem_size >= min) 2097 return 0; 2098 nvme_free_host_mem(dev); 2099 } 2100 } 2101 2102 return -ENOMEM; 2103 } 2104 2105 static int nvme_setup_host_mem(struct nvme_dev *dev) 2106 { 2107 u64 max = (u64)max_host_mem_size_mb * SZ_1M; 2108 u64 preferred = (u64)dev->ctrl.hmpre * 4096; 2109 u64 min = (u64)dev->ctrl.hmmin * 4096; 2110 u32 enable_bits = NVME_HOST_MEM_ENABLE; 2111 int ret; 2112 2113 preferred = min(preferred, max); 2114 if (min > max) { 2115 dev_warn(dev->ctrl.device, 2116 "min host memory (%lld MiB) above limit (%d MiB).\n", 2117 min >> ilog2(SZ_1M), max_host_mem_size_mb); 2118 nvme_free_host_mem(dev); 2119 return 0; 2120 } 2121 2122 /* 2123 * If we already have a buffer allocated check if we can reuse it. 2124 */ 2125 if (dev->host_mem_descs) { 2126 if (dev->host_mem_size >= min) 2127 enable_bits |= NVME_HOST_MEM_RETURN; 2128 else 2129 nvme_free_host_mem(dev); 2130 } 2131 2132 if (!dev->host_mem_descs) { 2133 if (nvme_alloc_host_mem(dev, min, preferred)) { 2134 dev_warn(dev->ctrl.device, 2135 "failed to allocate host memory buffer.\n"); 2136 return 0; /* controller must work without HMB */ 2137 } 2138 2139 dev_info(dev->ctrl.device, 2140 "allocated %lld MiB host memory buffer.\n", 2141 dev->host_mem_size >> ilog2(SZ_1M)); 2142 } 2143 2144 ret = nvme_set_host_mem(dev, enable_bits); 2145 if (ret) 2146 nvme_free_host_mem(dev); 2147 return ret; 2148 } 2149 2150 static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, 2151 char *buf) 2152 { 2153 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2154 2155 return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", 2156 ndev->cmbloc, ndev->cmbsz); 2157 } 2158 static DEVICE_ATTR_RO(cmb); 2159 2160 static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, 2161 char *buf) 2162 { 2163 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2164 2165 return sysfs_emit(buf, "%u\n", ndev->cmbloc); 2166 } 2167 static DEVICE_ATTR_RO(cmbloc); 2168 2169 static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, 2170 char *buf) 2171 { 2172 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2173 2174 return sysfs_emit(buf, "%u\n", ndev->cmbsz); 2175 } 2176 static DEVICE_ATTR_RO(cmbsz); 2177 2178 static ssize_t hmb_show(struct device *dev, struct device_attribute *attr, 2179 char *buf) 2180 { 2181 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2182 2183 return sysfs_emit(buf, "%d\n", ndev->hmb); 2184 } 2185 2186 static ssize_t hmb_store(struct device *dev, struct device_attribute *attr, 2187 const char *buf, size_t count) 2188 { 2189 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2190 bool new; 2191 int ret; 2192 2193 if (strtobool(buf, &new) < 0) 2194 return -EINVAL; 2195 2196 if (new == ndev->hmb) 2197 return count; 2198 2199 if (new) { 2200 ret = nvme_setup_host_mem(ndev); 2201 } else { 2202 ret = nvme_set_host_mem(ndev, 0); 2203 if (!ret) 2204 nvme_free_host_mem(ndev); 2205 } 2206 2207 if (ret < 0) 2208 return ret; 2209 2210 return count; 2211 } 2212 static DEVICE_ATTR_RW(hmb); 2213 2214 static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj, 2215 struct attribute *a, int n) 2216 { 2217 struct nvme_ctrl *ctrl = 2218 dev_get_drvdata(container_of(kobj, struct device, kobj)); 2219 struct nvme_dev *dev = to_nvme_dev(ctrl); 2220 2221 if (a == &dev_attr_cmb.attr || 2222 a == &dev_attr_cmbloc.attr || 2223 a == &dev_attr_cmbsz.attr) { 2224 if (!dev->cmbsz) 2225 return 0; 2226 } 2227 if (a == &dev_attr_hmb.attr && !ctrl->hmpre) 2228 return 0; 2229 2230 return a->mode; 2231 } 2232 2233 static struct attribute *nvme_pci_attrs[] = { 2234 &dev_attr_cmb.attr, 2235 &dev_attr_cmbloc.attr, 2236 &dev_attr_cmbsz.attr, 2237 &dev_attr_hmb.attr, 2238 NULL, 2239 }; 2240 2241 static const struct attribute_group nvme_pci_attr_group = { 2242 .attrs = nvme_pci_attrs, 2243 .is_visible = nvme_pci_attrs_are_visible, 2244 }; 2245 2246 /* 2247 * nirqs is the number of interrupts available for write and read 2248 * queues. The core already reserved an interrupt for the admin queue. 2249 */ 2250 static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) 2251 { 2252 struct nvme_dev *dev = affd->priv; 2253 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; 2254 2255 /* 2256 * If there is no interrupt available for queues, ensure that 2257 * the default queue is set to 1. The affinity set size is 2258 * also set to one, but the irq core ignores it for this case. 2259 * 2260 * If only one interrupt is available or 'write_queue' == 0, combine 2261 * write and read queues. 2262 * 2263 * If 'write_queues' > 0, ensure it leaves room for at least one read 2264 * queue. 2265 */ 2266 if (!nrirqs) { 2267 nrirqs = 1; 2268 nr_read_queues = 0; 2269 } else if (nrirqs == 1 || !nr_write_queues) { 2270 nr_read_queues = 0; 2271 } else if (nr_write_queues >= nrirqs) { 2272 nr_read_queues = 1; 2273 } else { 2274 nr_read_queues = nrirqs - nr_write_queues; 2275 } 2276 2277 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2278 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2279 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; 2280 affd->set_size[HCTX_TYPE_READ] = nr_read_queues; 2281 affd->nr_sets = nr_read_queues ? 2 : 1; 2282 } 2283 2284 static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 2285 { 2286 struct pci_dev *pdev = to_pci_dev(dev->dev); 2287 struct irq_affinity affd = { 2288 .pre_vectors = 1, 2289 .calc_sets = nvme_calc_irq_sets, 2290 .priv = dev, 2291 }; 2292 unsigned int irq_queues, poll_queues; 2293 2294 /* 2295 * Poll queues don't need interrupts, but we need at least one I/O queue 2296 * left over for non-polled I/O. 2297 */ 2298 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); 2299 dev->io_queues[HCTX_TYPE_POLL] = poll_queues; 2300 2301 /* 2302 * Initialize for the single interrupt case, will be updated in 2303 * nvme_calc_irq_sets(). 2304 */ 2305 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2306 dev->io_queues[HCTX_TYPE_READ] = 0; 2307 2308 /* 2309 * We need interrupts for the admin queue and each non-polled I/O queue, 2310 * but some Apple controllers require all queues to use the first 2311 * vector. 2312 */ 2313 irq_queues = 1; 2314 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) 2315 irq_queues += (nr_io_queues - poll_queues); 2316 return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, 2317 PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); 2318 } 2319 2320 static void nvme_disable_io_queues(struct nvme_dev *dev) 2321 { 2322 if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 2323 __nvme_disable_io_queues(dev, nvme_admin_delete_cq); 2324 } 2325 2326 static unsigned int nvme_max_io_queues(struct nvme_dev *dev) 2327 { 2328 /* 2329 * If tags are shared with admin queue (Apple bug), then 2330 * make sure we only use one IO queue. 2331 */ 2332 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2333 return 1; 2334 return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; 2335 } 2336 2337 static int nvme_setup_io_queues(struct nvme_dev *dev) 2338 { 2339 struct nvme_queue *adminq = &dev->queues[0]; 2340 struct pci_dev *pdev = to_pci_dev(dev->dev); 2341 unsigned int nr_io_queues; 2342 unsigned long size; 2343 int result; 2344 2345 /* 2346 * Sample the module parameters once at reset time so that we have 2347 * stable values to work with. 2348 */ 2349 dev->nr_write_queues = write_queues; 2350 dev->nr_poll_queues = poll_queues; 2351 2352 nr_io_queues = dev->nr_allocated_queues - 1; 2353 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 2354 if (result < 0) 2355 return result; 2356 2357 if (nr_io_queues == 0) 2358 return 0; 2359 2360 /* 2361 * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions 2362 * from set to unset. If there is a window to it is truely freed, 2363 * pci_free_irq_vectors() jumping into this window will crash. 2364 * And take lock to avoid racing with pci_free_irq_vectors() in 2365 * nvme_dev_disable() path. 2366 */ 2367 result = nvme_setup_io_queues_trylock(dev); 2368 if (result) 2369 return result; 2370 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2371 pci_free_irq(pdev, 0, adminq); 2372 2373 if (dev->cmb_use_sqes) { 2374 result = nvme_cmb_qdepth(dev, nr_io_queues, 2375 sizeof(struct nvme_command)); 2376 if (result > 0) 2377 dev->q_depth = result; 2378 else 2379 dev->cmb_use_sqes = false; 2380 } 2381 2382 do { 2383 size = db_bar_size(dev, nr_io_queues); 2384 result = nvme_remap_bar(dev, size); 2385 if (!result) 2386 break; 2387 if (!--nr_io_queues) { 2388 result = -ENOMEM; 2389 goto out_unlock; 2390 } 2391 } while (1); 2392 adminq->q_db = dev->dbs; 2393 2394 retry: 2395 /* Deregister the admin queue's interrupt */ 2396 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2397 pci_free_irq(pdev, 0, adminq); 2398 2399 /* 2400 * If we enable msix early due to not intx, disable it again before 2401 * setting up the full range we need. 2402 */ 2403 pci_free_irq_vectors(pdev); 2404 2405 result = nvme_setup_irqs(dev, nr_io_queues); 2406 if (result <= 0) { 2407 result = -EIO; 2408 goto out_unlock; 2409 } 2410 2411 dev->num_vecs = result; 2412 result = max(result - 1, 1); 2413 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 2414 2415 /* 2416 * Should investigate if there's a performance win from allocating 2417 * more queues than interrupt vectors; it might allow the submission 2418 * path to scale better, even if the receive path is limited by the 2419 * number of interrupts. 2420 */ 2421 result = queue_request_irq(adminq); 2422 if (result) 2423 goto out_unlock; 2424 set_bit(NVMEQ_ENABLED, &adminq->flags); 2425 mutex_unlock(&dev->shutdown_lock); 2426 2427 result = nvme_create_io_queues(dev); 2428 if (result || dev->online_queues < 2) 2429 return result; 2430 2431 if (dev->online_queues - 1 < dev->max_qid) { 2432 nr_io_queues = dev->online_queues - 1; 2433 nvme_disable_io_queues(dev); 2434 result = nvme_setup_io_queues_trylock(dev); 2435 if (result) 2436 return result; 2437 nvme_suspend_io_queues(dev); 2438 goto retry; 2439 } 2440 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 2441 dev->io_queues[HCTX_TYPE_DEFAULT], 2442 dev->io_queues[HCTX_TYPE_READ], 2443 dev->io_queues[HCTX_TYPE_POLL]); 2444 return 0; 2445 out_unlock: 2446 mutex_unlock(&dev->shutdown_lock); 2447 return result; 2448 } 2449 2450 static enum rq_end_io_ret nvme_del_queue_end(struct request *req, 2451 blk_status_t error) 2452 { 2453 struct nvme_queue *nvmeq = req->end_io_data; 2454 2455 blk_mq_free_request(req); 2456 complete(&nvmeq->delete_done); 2457 return RQ_END_IO_NONE; 2458 } 2459 2460 static enum rq_end_io_ret nvme_del_cq_end(struct request *req, 2461 blk_status_t error) 2462 { 2463 struct nvme_queue *nvmeq = req->end_io_data; 2464 2465 if (error) 2466 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2467 2468 return nvme_del_queue_end(req, error); 2469 } 2470 2471 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2472 { 2473 struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2474 struct request *req; 2475 struct nvme_command cmd = { }; 2476 2477 cmd.delete_queue.opcode = opcode; 2478 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2479 2480 req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT); 2481 if (IS_ERR(req)) 2482 return PTR_ERR(req); 2483 nvme_init_request(req, &cmd); 2484 2485 if (opcode == nvme_admin_delete_cq) 2486 req->end_io = nvme_del_cq_end; 2487 else 2488 req->end_io = nvme_del_queue_end; 2489 req->end_io_data = nvmeq; 2490 2491 init_completion(&nvmeq->delete_done); 2492 blk_execute_rq_nowait(req, false); 2493 return 0; 2494 } 2495 2496 static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2497 { 2498 int nr_queues = dev->online_queues - 1, sent = 0; 2499 unsigned long timeout; 2500 2501 retry: 2502 timeout = NVME_ADMIN_TIMEOUT; 2503 while (nr_queues > 0) { 2504 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2505 break; 2506 nr_queues--; 2507 sent++; 2508 } 2509 while (sent) { 2510 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2511 2512 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 2513 timeout); 2514 if (timeout == 0) 2515 return false; 2516 2517 sent--; 2518 if (nr_queues) 2519 goto retry; 2520 } 2521 return true; 2522 } 2523 2524 static void nvme_pci_alloc_tag_set(struct nvme_dev *dev) 2525 { 2526 struct blk_mq_tag_set * set = &dev->tagset; 2527 int ret; 2528 2529 set->ops = &nvme_mq_ops; 2530 set->nr_hw_queues = dev->online_queues - 1; 2531 set->nr_maps = 1; 2532 if (dev->io_queues[HCTX_TYPE_READ]) 2533 set->nr_maps = 2; 2534 if (dev->io_queues[HCTX_TYPE_POLL]) 2535 set->nr_maps = 3; 2536 set->timeout = NVME_IO_TIMEOUT; 2537 set->numa_node = dev->ctrl.numa_node; 2538 set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; 2539 set->cmd_size = sizeof(struct nvme_iod); 2540 set->flags = BLK_MQ_F_SHOULD_MERGE; 2541 set->driver_data = dev; 2542 2543 /* 2544 * Some Apple controllers requires tags to be unique 2545 * across admin and IO queue, so reserve the first 32 2546 * tags of the IO queue. 2547 */ 2548 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2549 set->reserved_tags = NVME_AQ_DEPTH; 2550 2551 ret = blk_mq_alloc_tag_set(set); 2552 if (ret) { 2553 dev_warn(dev->ctrl.device, 2554 "IO queues tagset allocation failed %d\n", ret); 2555 return; 2556 } 2557 dev->ctrl.tagset = set; 2558 } 2559 2560 static void nvme_pci_update_nr_queues(struct nvme_dev *dev) 2561 { 2562 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 2563 /* free previously allocated queues that are no longer usable */ 2564 nvme_free_queues(dev, dev->online_queues); 2565 } 2566 2567 static int nvme_pci_enable(struct nvme_dev *dev) 2568 { 2569 int result = -ENOMEM; 2570 struct pci_dev *pdev = to_pci_dev(dev->dev); 2571 int dma_address_bits = 64; 2572 2573 if (pci_enable_device_mem(pdev)) 2574 return result; 2575 2576 pci_set_master(pdev); 2577 2578 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) 2579 dma_address_bits = 48; 2580 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits))) 2581 goto disable; 2582 2583 if (readl(dev->bar + NVME_REG_CSTS) == -1) { 2584 result = -ENODEV; 2585 goto disable; 2586 } 2587 2588 /* 2589 * Some devices and/or platforms don't advertise or work with INTx 2590 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2591 * adjust this later. 2592 */ 2593 result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 2594 if (result < 0) 2595 return result; 2596 2597 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 2598 2599 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2600 io_queue_depth); 2601 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ 2602 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 2603 dev->dbs = dev->bar + 4096; 2604 2605 /* 2606 * Some Apple controllers require a non-standard SQE size. 2607 * Interestingly they also seem to ignore the CC:IOSQES register 2608 * so we don't bother updating it here. 2609 */ 2610 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) 2611 dev->io_sqes = 7; 2612 else 2613 dev->io_sqes = NVME_NVM_IOSQES; 2614 2615 /* 2616 * Temporary fix for the Apple controller found in the MacBook8,1 and 2617 * some MacBook7,1 to avoid controller resets and data loss. 2618 */ 2619 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 2620 dev->q_depth = 2; 2621 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " 2622 "set queue depth=%u to work around controller resets\n", 2623 dev->q_depth); 2624 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2625 (pdev->device == 0xa821 || pdev->device == 0xa822) && 2626 NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2627 dev->q_depth = 64; 2628 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2629 "set queue depth=%u\n", dev->q_depth); 2630 } 2631 2632 /* 2633 * Controllers with the shared tags quirk need the IO queue to be 2634 * big enough so that we get 32 tags for the admin queue 2635 */ 2636 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && 2637 (dev->q_depth < (NVME_AQ_DEPTH + 2))) { 2638 dev->q_depth = NVME_AQ_DEPTH + 2; 2639 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", 2640 dev->q_depth); 2641 } 2642 2643 2644 nvme_map_cmb(dev); 2645 2646 pci_enable_pcie_error_reporting(pdev); 2647 pci_save_state(pdev); 2648 return 0; 2649 2650 disable: 2651 pci_disable_device(pdev); 2652 return result; 2653 } 2654 2655 static void nvme_dev_unmap(struct nvme_dev *dev) 2656 { 2657 if (dev->bar) 2658 iounmap(dev->bar); 2659 pci_release_mem_regions(to_pci_dev(dev->dev)); 2660 } 2661 2662 static void nvme_pci_disable(struct nvme_dev *dev) 2663 { 2664 struct pci_dev *pdev = to_pci_dev(dev->dev); 2665 2666 pci_free_irq_vectors(pdev); 2667 2668 if (pci_is_enabled(pdev)) { 2669 pci_disable_pcie_error_reporting(pdev); 2670 pci_disable_device(pdev); 2671 } 2672 } 2673 2674 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 2675 { 2676 bool dead = true, freeze = false; 2677 struct pci_dev *pdev = to_pci_dev(dev->dev); 2678 2679 mutex_lock(&dev->shutdown_lock); 2680 if (pci_is_enabled(pdev)) { 2681 u32 csts; 2682 2683 if (pci_device_is_present(pdev)) 2684 csts = readl(dev->bar + NVME_REG_CSTS); 2685 else 2686 csts = ~0; 2687 2688 if (dev->ctrl.state == NVME_CTRL_LIVE || 2689 dev->ctrl.state == NVME_CTRL_RESETTING) { 2690 freeze = true; 2691 nvme_start_freeze(&dev->ctrl); 2692 } 2693 dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 2694 pdev->error_state != pci_channel_io_normal); 2695 } 2696 2697 /* 2698 * Give the controller a chance to complete all entered requests if 2699 * doing a safe shutdown. 2700 */ 2701 if (!dead && shutdown && freeze) 2702 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 2703 2704 nvme_stop_queues(&dev->ctrl); 2705 2706 if (!dead && dev->ctrl.queue_count > 0) { 2707 nvme_disable_io_queues(dev); 2708 nvme_disable_admin_queue(dev, shutdown); 2709 } 2710 nvme_suspend_io_queues(dev); 2711 nvme_suspend_queue(&dev->queues[0]); 2712 nvme_pci_disable(dev); 2713 nvme_reap_pending_cqes(dev); 2714 2715 nvme_cancel_tagset(&dev->ctrl); 2716 nvme_cancel_admin_tagset(&dev->ctrl); 2717 2718 /* 2719 * The driver will not be starting up queues again if shutting down so 2720 * must flush all entered requests to their failed completion to avoid 2721 * deadlocking blk-mq hot-cpu notifier. 2722 */ 2723 if (shutdown) { 2724 nvme_start_queues(&dev->ctrl); 2725 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) 2726 nvme_start_admin_queue(&dev->ctrl); 2727 } 2728 mutex_unlock(&dev->shutdown_lock); 2729 } 2730 2731 static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) 2732 { 2733 if (!nvme_wait_reset(&dev->ctrl)) 2734 return -EBUSY; 2735 nvme_dev_disable(dev, shutdown); 2736 return 0; 2737 } 2738 2739 static int nvme_setup_prp_pools(struct nvme_dev *dev) 2740 { 2741 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 2742 NVME_CTRL_PAGE_SIZE, 2743 NVME_CTRL_PAGE_SIZE, 0); 2744 if (!dev->prp_page_pool) 2745 return -ENOMEM; 2746 2747 /* Optimisation for I/Os between 4k and 128k */ 2748 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 2749 256, 256, 0); 2750 if (!dev->prp_small_pool) { 2751 dma_pool_destroy(dev->prp_page_pool); 2752 return -ENOMEM; 2753 } 2754 return 0; 2755 } 2756 2757 static void nvme_release_prp_pools(struct nvme_dev *dev) 2758 { 2759 dma_pool_destroy(dev->prp_page_pool); 2760 dma_pool_destroy(dev->prp_small_pool); 2761 } 2762 2763 static void nvme_free_tagset(struct nvme_dev *dev) 2764 { 2765 if (dev->tagset.tags) 2766 blk_mq_free_tag_set(&dev->tagset); 2767 dev->ctrl.tagset = NULL; 2768 } 2769 2770 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 2771 { 2772 struct nvme_dev *dev = to_nvme_dev(ctrl); 2773 2774 nvme_dbbuf_dma_free(dev); 2775 nvme_free_tagset(dev); 2776 if (dev->ctrl.admin_q) 2777 blk_put_queue(dev->ctrl.admin_q); 2778 free_opal_dev(dev->ctrl.opal_dev); 2779 mempool_destroy(dev->iod_mempool); 2780 put_device(dev->dev); 2781 kfree(dev->queues); 2782 kfree(dev); 2783 } 2784 2785 static void nvme_remove_dead_ctrl(struct nvme_dev *dev) 2786 { 2787 /* 2788 * Set state to deleting now to avoid blocking nvme_wait_reset(), which 2789 * may be holding this pci_dev's device lock. 2790 */ 2791 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2792 nvme_get_ctrl(&dev->ctrl); 2793 nvme_dev_disable(dev, false); 2794 nvme_kill_queues(&dev->ctrl); 2795 if (!queue_work(nvme_wq, &dev->remove_work)) 2796 nvme_put_ctrl(&dev->ctrl); 2797 } 2798 2799 static void nvme_reset_work(struct work_struct *work) 2800 { 2801 struct nvme_dev *dev = 2802 container_of(work, struct nvme_dev, ctrl.reset_work); 2803 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2804 int result; 2805 2806 if (dev->ctrl.state != NVME_CTRL_RESETTING) { 2807 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", 2808 dev->ctrl.state); 2809 result = -ENODEV; 2810 goto out; 2811 } 2812 2813 /* 2814 * If we're called to reset a live controller first shut it down before 2815 * moving on. 2816 */ 2817 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2818 nvme_dev_disable(dev, false); 2819 nvme_sync_queues(&dev->ctrl); 2820 2821 mutex_lock(&dev->shutdown_lock); 2822 result = nvme_pci_enable(dev); 2823 if (result) 2824 goto out_unlock; 2825 2826 result = nvme_pci_configure_admin_queue(dev); 2827 if (result) 2828 goto out_unlock; 2829 2830 if (!dev->ctrl.admin_q) { 2831 result = nvme_pci_alloc_admin_tag_set(dev); 2832 if (result) 2833 goto out_unlock; 2834 } else { 2835 nvme_start_admin_queue(&dev->ctrl); 2836 } 2837 2838 dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1); 2839 2840 /* 2841 * Limit the max command size to prevent iod->sg allocations going 2842 * over a single page. 2843 */ 2844 dev->ctrl.max_hw_sectors = min_t(u32, 2845 NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9); 2846 dev->ctrl.max_segments = NVME_MAX_SEGS; 2847 2848 /* 2849 * Don't limit the IOMMU merged segment size. 2850 */ 2851 dma_set_max_seg_size(dev->dev, 0xffffffff); 2852 2853 mutex_unlock(&dev->shutdown_lock); 2854 2855 /* 2856 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 2857 * initializing procedure here. 2858 */ 2859 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 2860 dev_warn(dev->ctrl.device, 2861 "failed to mark controller CONNECTING\n"); 2862 result = -EBUSY; 2863 goto out; 2864 } 2865 2866 /* 2867 * We do not support an SGL for metadata (yet), so we are limited to a 2868 * single integrity segment for the separate metadata pointer. 2869 */ 2870 dev->ctrl.max_integrity_segments = 1; 2871 2872 result = nvme_init_ctrl_finish(&dev->ctrl); 2873 if (result) 2874 goto out; 2875 2876 if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { 2877 if (!dev->ctrl.opal_dev) 2878 dev->ctrl.opal_dev = 2879 init_opal_dev(&dev->ctrl, &nvme_sec_submit); 2880 else if (was_suspend) 2881 opal_unlock_from_suspend(dev->ctrl.opal_dev); 2882 } else { 2883 free_opal_dev(dev->ctrl.opal_dev); 2884 dev->ctrl.opal_dev = NULL; 2885 } 2886 2887 if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { 2888 result = nvme_dbbuf_dma_alloc(dev); 2889 if (result) 2890 dev_warn(dev->dev, 2891 "unable to allocate dma for dbbuf\n"); 2892 } 2893 2894 if (dev->ctrl.hmpre) { 2895 result = nvme_setup_host_mem(dev); 2896 if (result < 0) 2897 goto out; 2898 } 2899 2900 result = nvme_setup_io_queues(dev); 2901 if (result) 2902 goto out; 2903 2904 /* 2905 * Keep the controller around but remove all namespaces if we don't have 2906 * any working I/O queue. 2907 */ 2908 if (dev->online_queues < 2) { 2909 dev_warn(dev->ctrl.device, "IO queues not created\n"); 2910 nvme_kill_queues(&dev->ctrl); 2911 nvme_remove_namespaces(&dev->ctrl); 2912 nvme_free_tagset(dev); 2913 } else { 2914 nvme_start_queues(&dev->ctrl); 2915 nvme_wait_freeze(&dev->ctrl); 2916 if (!dev->ctrl.tagset) 2917 nvme_pci_alloc_tag_set(dev); 2918 else 2919 nvme_pci_update_nr_queues(dev); 2920 nvme_dbbuf_set(dev); 2921 nvme_unfreeze(&dev->ctrl); 2922 } 2923 2924 /* 2925 * If only admin queue live, keep it to do further investigation or 2926 * recovery. 2927 */ 2928 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 2929 dev_warn(dev->ctrl.device, 2930 "failed to mark controller live state\n"); 2931 result = -ENODEV; 2932 goto out; 2933 } 2934 2935 if (!dev->attrs_added && !sysfs_create_group(&dev->ctrl.device->kobj, 2936 &nvme_pci_attr_group)) 2937 dev->attrs_added = true; 2938 2939 nvme_start_ctrl(&dev->ctrl); 2940 return; 2941 2942 out_unlock: 2943 mutex_unlock(&dev->shutdown_lock); 2944 out: 2945 if (result) 2946 dev_warn(dev->ctrl.device, 2947 "Removing after probe failure status: %d\n", result); 2948 nvme_remove_dead_ctrl(dev); 2949 } 2950 2951 static void nvme_remove_dead_ctrl_work(struct work_struct *work) 2952 { 2953 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 2954 struct pci_dev *pdev = to_pci_dev(dev->dev); 2955 2956 if (pci_get_drvdata(pdev)) 2957 device_release_driver(&pdev->dev); 2958 nvme_put_ctrl(&dev->ctrl); 2959 } 2960 2961 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 2962 { 2963 *val = readl(to_nvme_dev(ctrl)->bar + off); 2964 return 0; 2965 } 2966 2967 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 2968 { 2969 writel(val, to_nvme_dev(ctrl)->bar + off); 2970 return 0; 2971 } 2972 2973 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 2974 { 2975 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); 2976 return 0; 2977 } 2978 2979 static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 2980 { 2981 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 2982 2983 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); 2984 } 2985 2986 static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) 2987 { 2988 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 2989 struct nvme_subsystem *subsys = ctrl->subsys; 2990 2991 dev_err(ctrl->device, 2992 "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", 2993 pdev->vendor, pdev->device, 2994 nvme_strlen(subsys->model, sizeof(subsys->model)), 2995 subsys->model, nvme_strlen(subsys->firmware_rev, 2996 sizeof(subsys->firmware_rev)), 2997 subsys->firmware_rev); 2998 } 2999 3000 static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) 3001 { 3002 struct nvme_dev *dev = to_nvme_dev(ctrl); 3003 3004 return dma_pci_p2pdma_supported(dev->dev); 3005 } 3006 3007 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 3008 .name = "pcie", 3009 .module = THIS_MODULE, 3010 .flags = NVME_F_METADATA_SUPPORTED, 3011 .reg_read32 = nvme_pci_reg_read32, 3012 .reg_write32 = nvme_pci_reg_write32, 3013 .reg_read64 = nvme_pci_reg_read64, 3014 .free_ctrl = nvme_pci_free_ctrl, 3015 .submit_async_event = nvme_pci_submit_async_event, 3016 .get_address = nvme_pci_get_address, 3017 .print_device_info = nvme_pci_print_device_info, 3018 .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, 3019 }; 3020 3021 static int nvme_dev_map(struct nvme_dev *dev) 3022 { 3023 struct pci_dev *pdev = to_pci_dev(dev->dev); 3024 3025 if (pci_request_mem_regions(pdev, "nvme")) 3026 return -ENODEV; 3027 3028 if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 3029 goto release; 3030 3031 return 0; 3032 release: 3033 pci_release_mem_regions(pdev); 3034 return -ENODEV; 3035 } 3036 3037 static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 3038 { 3039 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 3040 /* 3041 * Several Samsung devices seem to drop off the PCIe bus 3042 * randomly when APST is on and uses the deepest sleep state. 3043 * This has been observed on a Samsung "SM951 NVMe SAMSUNG 3044 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 3045 * 950 PRO 256GB", but it seems to be restricted to two Dell 3046 * laptops. 3047 */ 3048 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 3049 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 3050 dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 3051 return NVME_QUIRK_NO_DEEPEST_PS; 3052 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 3053 /* 3054 * Samsung SSD 960 EVO drops off the PCIe bus after system 3055 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 3056 * within few minutes after bootup on a Coffee Lake board - 3057 * ASUS PRIME Z370-A 3058 */ 3059 if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 3060 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 3061 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 3062 return NVME_QUIRK_NO_APST; 3063 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || 3064 pdev->device == 0xa808 || pdev->device == 0xa809)) || 3065 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { 3066 /* 3067 * Forcing to use host managed nvme power settings for 3068 * lowest idle power with quick resume latency on 3069 * Samsung and Toshiba SSDs based on suspend behavior 3070 * on Coffee Lake board for LENOVO C640 3071 */ 3072 if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && 3073 dmi_match(DMI_BOARD_NAME, "LNVNB161216")) 3074 return NVME_QUIRK_SIMPLE_SUSPEND; 3075 } 3076 3077 return 0; 3078 } 3079 3080 static void nvme_async_probe(void *data, async_cookie_t cookie) 3081 { 3082 struct nvme_dev *dev = data; 3083 3084 flush_work(&dev->ctrl.reset_work); 3085 flush_work(&dev->ctrl.scan_work); 3086 nvme_put_ctrl(&dev->ctrl); 3087 } 3088 3089 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3090 { 3091 int node, result = -ENOMEM; 3092 struct nvme_dev *dev; 3093 unsigned long quirks = id->driver_data; 3094 size_t alloc_size; 3095 3096 node = dev_to_node(&pdev->dev); 3097 if (node == NUMA_NO_NODE) 3098 set_dev_node(&pdev->dev, first_memory_node); 3099 3100 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 3101 if (!dev) 3102 return -ENOMEM; 3103 3104 dev->nr_write_queues = write_queues; 3105 dev->nr_poll_queues = poll_queues; 3106 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; 3107 dev->queues = kcalloc_node(dev->nr_allocated_queues, 3108 sizeof(struct nvme_queue), GFP_KERNEL, node); 3109 if (!dev->queues) 3110 goto free; 3111 3112 dev->dev = get_device(&pdev->dev); 3113 pci_set_drvdata(pdev, dev); 3114 3115 result = nvme_dev_map(dev); 3116 if (result) 3117 goto put_pci; 3118 3119 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 3120 INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 3121 mutex_init(&dev->shutdown_lock); 3122 3123 result = nvme_setup_prp_pools(dev); 3124 if (result) 3125 goto unmap; 3126 3127 quirks |= check_vendor_combination_bug(pdev); 3128 3129 if (!noacpi && acpi_storage_d3(&pdev->dev)) { 3130 /* 3131 * Some systems use a bios work around to ask for D3 on 3132 * platforms that support kernel managed suspend. 3133 */ 3134 dev_info(&pdev->dev, 3135 "platform quirk: setting simple suspend\n"); 3136 quirks |= NVME_QUIRK_SIMPLE_SUSPEND; 3137 } 3138 3139 /* 3140 * Double check that our mempool alloc size will cover the biggest 3141 * command we support. 3142 */ 3143 alloc_size = nvme_pci_iod_alloc_size(); 3144 WARN_ON_ONCE(alloc_size > PAGE_SIZE); 3145 3146 dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, 3147 mempool_kfree, 3148 (void *) alloc_size, 3149 GFP_KERNEL, node); 3150 if (!dev->iod_mempool) { 3151 result = -ENOMEM; 3152 goto release_pools; 3153 } 3154 3155 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 3156 quirks); 3157 if (result) 3158 goto release_mempool; 3159 3160 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 3161 3162 nvme_reset_ctrl(&dev->ctrl); 3163 async_schedule(nvme_async_probe, dev); 3164 3165 return 0; 3166 3167 release_mempool: 3168 mempool_destroy(dev->iod_mempool); 3169 release_pools: 3170 nvme_release_prp_pools(dev); 3171 unmap: 3172 nvme_dev_unmap(dev); 3173 put_pci: 3174 put_device(dev->dev); 3175 free: 3176 kfree(dev->queues); 3177 kfree(dev); 3178 return result; 3179 } 3180 3181 static void nvme_reset_prepare(struct pci_dev *pdev) 3182 { 3183 struct nvme_dev *dev = pci_get_drvdata(pdev); 3184 3185 /* 3186 * We don't need to check the return value from waiting for the reset 3187 * state as pci_dev device lock is held, making it impossible to race 3188 * with ->remove(). 3189 */ 3190 nvme_disable_prepare_reset(dev, false); 3191 nvme_sync_queues(&dev->ctrl); 3192 } 3193 3194 static void nvme_reset_done(struct pci_dev *pdev) 3195 { 3196 struct nvme_dev *dev = pci_get_drvdata(pdev); 3197 3198 if (!nvme_try_sched_reset(&dev->ctrl)) 3199 flush_work(&dev->ctrl.reset_work); 3200 } 3201 3202 static void nvme_shutdown(struct pci_dev *pdev) 3203 { 3204 struct nvme_dev *dev = pci_get_drvdata(pdev); 3205 3206 nvme_disable_prepare_reset(dev, true); 3207 } 3208 3209 static void nvme_remove_attrs(struct nvme_dev *dev) 3210 { 3211 if (dev->attrs_added) 3212 sysfs_remove_group(&dev->ctrl.device->kobj, 3213 &nvme_pci_attr_group); 3214 } 3215 3216 /* 3217 * The driver's remove may be called on a device in a partially initialized 3218 * state. This function must not have any dependencies on the device state in 3219 * order to proceed. 3220 */ 3221 static void nvme_remove(struct pci_dev *pdev) 3222 { 3223 struct nvme_dev *dev = pci_get_drvdata(pdev); 3224 3225 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 3226 pci_set_drvdata(pdev, NULL); 3227 3228 if (!pci_device_is_present(pdev)) { 3229 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 3230 nvme_dev_disable(dev, true); 3231 } 3232 3233 flush_work(&dev->ctrl.reset_work); 3234 nvme_stop_ctrl(&dev->ctrl); 3235 nvme_remove_namespaces(&dev->ctrl); 3236 nvme_dev_disable(dev, true); 3237 nvme_remove_attrs(dev); 3238 nvme_free_host_mem(dev); 3239 nvme_dev_remove_admin(dev); 3240 nvme_free_queues(dev, 0); 3241 nvme_release_prp_pools(dev); 3242 nvme_dev_unmap(dev); 3243 nvme_uninit_ctrl(&dev->ctrl); 3244 } 3245 3246 #ifdef CONFIG_PM_SLEEP 3247 static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) 3248 { 3249 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); 3250 } 3251 3252 static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) 3253 { 3254 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); 3255 } 3256 3257 static int nvme_resume(struct device *dev) 3258 { 3259 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3260 struct nvme_ctrl *ctrl = &ndev->ctrl; 3261 3262 if (ndev->last_ps == U32_MAX || 3263 nvme_set_power_state(ctrl, ndev->last_ps) != 0) 3264 goto reset; 3265 if (ctrl->hmpre && nvme_setup_host_mem(ndev)) 3266 goto reset; 3267 3268 return 0; 3269 reset: 3270 return nvme_try_sched_reset(ctrl); 3271 } 3272 3273 static int nvme_suspend(struct device *dev) 3274 { 3275 struct pci_dev *pdev = to_pci_dev(dev); 3276 struct nvme_dev *ndev = pci_get_drvdata(pdev); 3277 struct nvme_ctrl *ctrl = &ndev->ctrl; 3278 int ret = -EBUSY; 3279 3280 ndev->last_ps = U32_MAX; 3281 3282 /* 3283 * The platform does not remove power for a kernel managed suspend so 3284 * use host managed nvme power settings for lowest idle power if 3285 * possible. This should have quicker resume latency than a full device 3286 * shutdown. But if the firmware is involved after the suspend or the 3287 * device does not support any non-default power states, shut down the 3288 * device fully. 3289 * 3290 * If ASPM is not enabled for the device, shut down the device and allow 3291 * the PCI bus layer to put it into D3 in order to take the PCIe link 3292 * down, so as to allow the platform to achieve its minimum low-power 3293 * state (which may not be possible if the link is up). 3294 */ 3295 if (pm_suspend_via_firmware() || !ctrl->npss || 3296 !pcie_aspm_enabled(pdev) || 3297 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) 3298 return nvme_disable_prepare_reset(ndev, true); 3299 3300 nvme_start_freeze(ctrl); 3301 nvme_wait_freeze(ctrl); 3302 nvme_sync_queues(ctrl); 3303 3304 if (ctrl->state != NVME_CTRL_LIVE) 3305 goto unfreeze; 3306 3307 /* 3308 * Host memory access may not be successful in a system suspend state, 3309 * but the specification allows the controller to access memory in a 3310 * non-operational power state. 3311 */ 3312 if (ndev->hmb) { 3313 ret = nvme_set_host_mem(ndev, 0); 3314 if (ret < 0) 3315 goto unfreeze; 3316 } 3317 3318 ret = nvme_get_power_state(ctrl, &ndev->last_ps); 3319 if (ret < 0) 3320 goto unfreeze; 3321 3322 /* 3323 * A saved state prevents pci pm from generically controlling the 3324 * device's power. If we're using protocol specific settings, we don't 3325 * want pci interfering. 3326 */ 3327 pci_save_state(pdev); 3328 3329 ret = nvme_set_power_state(ctrl, ctrl->npss); 3330 if (ret < 0) 3331 goto unfreeze; 3332 3333 if (ret) { 3334 /* discard the saved state */ 3335 pci_load_saved_state(pdev, NULL); 3336 3337 /* 3338 * Clearing npss forces a controller reset on resume. The 3339 * correct value will be rediscovered then. 3340 */ 3341 ret = nvme_disable_prepare_reset(ndev, true); 3342 ctrl->npss = 0; 3343 } 3344 unfreeze: 3345 nvme_unfreeze(ctrl); 3346 return ret; 3347 } 3348 3349 static int nvme_simple_suspend(struct device *dev) 3350 { 3351 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3352 3353 return nvme_disable_prepare_reset(ndev, true); 3354 } 3355 3356 static int nvme_simple_resume(struct device *dev) 3357 { 3358 struct pci_dev *pdev = to_pci_dev(dev); 3359 struct nvme_dev *ndev = pci_get_drvdata(pdev); 3360 3361 return nvme_try_sched_reset(&ndev->ctrl); 3362 } 3363 3364 static const struct dev_pm_ops nvme_dev_pm_ops = { 3365 .suspend = nvme_suspend, 3366 .resume = nvme_resume, 3367 .freeze = nvme_simple_suspend, 3368 .thaw = nvme_simple_resume, 3369 .poweroff = nvme_simple_suspend, 3370 .restore = nvme_simple_resume, 3371 }; 3372 #endif /* CONFIG_PM_SLEEP */ 3373 3374 static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 3375 pci_channel_state_t state) 3376 { 3377 struct nvme_dev *dev = pci_get_drvdata(pdev); 3378 3379 /* 3380 * A frozen channel requires a reset. When detected, this method will 3381 * shutdown the controller to quiesce. The controller will be restarted 3382 * after the slot reset through driver's slot_reset callback. 3383 */ 3384 switch (state) { 3385 case pci_channel_io_normal: 3386 return PCI_ERS_RESULT_CAN_RECOVER; 3387 case pci_channel_io_frozen: 3388 dev_warn(dev->ctrl.device, 3389 "frozen state error detected, reset controller\n"); 3390 nvme_dev_disable(dev, false); 3391 return PCI_ERS_RESULT_NEED_RESET; 3392 case pci_channel_io_perm_failure: 3393 dev_warn(dev->ctrl.device, 3394 "failure state error detected, request disconnect\n"); 3395 return PCI_ERS_RESULT_DISCONNECT; 3396 } 3397 return PCI_ERS_RESULT_NEED_RESET; 3398 } 3399 3400 static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 3401 { 3402 struct nvme_dev *dev = pci_get_drvdata(pdev); 3403 3404 dev_info(dev->ctrl.device, "restart after slot reset\n"); 3405 pci_restore_state(pdev); 3406 nvme_reset_ctrl(&dev->ctrl); 3407 return PCI_ERS_RESULT_RECOVERED; 3408 } 3409 3410 static void nvme_error_resume(struct pci_dev *pdev) 3411 { 3412 struct nvme_dev *dev = pci_get_drvdata(pdev); 3413 3414 flush_work(&dev->ctrl.reset_work); 3415 } 3416 3417 static const struct pci_error_handlers nvme_err_handler = { 3418 .error_detected = nvme_error_detected, 3419 .slot_reset = nvme_slot_reset, 3420 .resume = nvme_error_resume, 3421 .reset_prepare = nvme_reset_prepare, 3422 .reset_done = nvme_reset_done, 3423 }; 3424 3425 static const struct pci_device_id nvme_id_table[] = { 3426 { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ 3427 .driver_data = NVME_QUIRK_STRIPE_SIZE | 3428 NVME_QUIRK_DEALLOCATE_ZEROES, }, 3429 { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ 3430 .driver_data = NVME_QUIRK_STRIPE_SIZE | 3431 NVME_QUIRK_DEALLOCATE_ZEROES, }, 3432 { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ 3433 .driver_data = NVME_QUIRK_STRIPE_SIZE | 3434 NVME_QUIRK_DEALLOCATE_ZEROES | 3435 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3436 { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ 3437 .driver_data = NVME_QUIRK_STRIPE_SIZE | 3438 NVME_QUIRK_DEALLOCATE_ZEROES, }, 3439 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 3440 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3441 NVME_QUIRK_MEDIUM_PRIO_SQ | 3442 NVME_QUIRK_NO_TEMP_THRESH_CHANGE | 3443 NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3444 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 3445 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3446 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 3447 .driver_data = NVME_QUIRK_IDENTIFY_CNS | 3448 NVME_QUIRK_DISABLE_WRITE_ZEROES | 3449 NVME_QUIRK_BOGUS_NID, }, 3450 { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ 3451 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3452 { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ 3453 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 3454 NVME_QUIRK_BOGUS_NID, }, 3455 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 3456 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3457 NVME_QUIRK_NO_NS_DESC_LIST, }, 3458 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 3459 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3460 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 3461 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3462 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 3463 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3464 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3465 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3466 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 3467 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3468 NVME_QUIRK_DISABLE_WRITE_ZEROES| 3469 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3470 { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ 3471 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3472 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ 3473 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 3474 NVME_QUIRK_BOGUS_NID, }, 3475 { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */ 3476 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3477 { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */ 3478 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3479 { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ 3480 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 3481 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3482 { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */ 3483 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3484 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ 3485 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 3486 NVME_QUIRK_BOGUS_NID, }, 3487 { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ 3488 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3489 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3490 { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ 3491 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, 3492 { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ 3493 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3494 { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ 3495 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3496 { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ 3497 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3498 { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ 3499 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3500 { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ 3501 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3502 { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ 3503 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3504 { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ 3505 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3506 { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ 3507 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3508 { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ 3509 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3510 { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ 3511 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3512 { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ 3513 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3514 { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ 3515 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3516 { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ 3517 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3518 { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ 3519 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3520 { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ 3521 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3522 { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ 3523 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3524 { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ 3525 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3526 { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ 3527 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3528 { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ 3529 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3530 { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ 3531 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3532 { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ 3533 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3534 { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ 3535 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3536 { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ 3537 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3538 { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ 3539 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3540 { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ 3541 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3542 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), 3543 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3544 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), 3545 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3546 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), 3547 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3548 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), 3549 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3550 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), 3551 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3552 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), 3553 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3554 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), 3555 .driver_data = NVME_QUIRK_SINGLE_VECTOR }, 3556 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 3557 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 3558 .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3559 NVME_QUIRK_128_BYTES_SQES | 3560 NVME_QUIRK_SHARED_TAGS | 3561 NVME_QUIRK_SKIP_CID_GEN }, 3562 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3563 { 0, } 3564 }; 3565 MODULE_DEVICE_TABLE(pci, nvme_id_table); 3566 3567 static struct pci_driver nvme_driver = { 3568 .name = "nvme", 3569 .id_table = nvme_id_table, 3570 .probe = nvme_probe, 3571 .remove = nvme_remove, 3572 .shutdown = nvme_shutdown, 3573 #ifdef CONFIG_PM_SLEEP 3574 .driver = { 3575 .pm = &nvme_dev_pm_ops, 3576 }, 3577 #endif 3578 .sriov_configure = pci_sriov_configure_simple, 3579 .err_handler = &nvme_err_handler, 3580 }; 3581 3582 static int __init nvme_init(void) 3583 { 3584 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 3585 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 3586 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 3587 BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); 3588 BUILD_BUG_ON(DIV_ROUND_UP(nvme_pci_npages_prp(), NVME_CTRL_PAGE_SIZE) > 3589 S8_MAX); 3590 3591 return pci_register_driver(&nvme_driver); 3592 } 3593 3594 static void __exit nvme_exit(void) 3595 { 3596 pci_unregister_driver(&nvme_driver); 3597 flush_workqueue(nvme_wq); 3598 } 3599 3600 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 3601 MODULE_LICENSE("GPL"); 3602 MODULE_VERSION("1.0"); 3603 module_init(nvme_init); 3604 module_exit(nvme_exit); 3605