157dacad5SJay Sternberg /* 257dacad5SJay Sternberg * NVM Express device driver 357dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 457dacad5SJay Sternberg * 557dacad5SJay Sternberg * This program is free software; you can redistribute it and/or modify it 657dacad5SJay Sternberg * under the terms and conditions of the GNU General Public License, 757dacad5SJay Sternberg * version 2, as published by the Free Software Foundation. 857dacad5SJay Sternberg * 957dacad5SJay Sternberg * This program is distributed in the hope it will be useful, but WITHOUT 1057dacad5SJay Sternberg * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1157dacad5SJay Sternberg * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 1257dacad5SJay Sternberg * more details. 1357dacad5SJay Sternberg */ 1457dacad5SJay Sternberg 15a0a3408eSKeith Busch #include <linux/aer.h> 1618119775SKeith Busch #include <linux/async.h> 1757dacad5SJay Sternberg #include <linux/blkdev.h> 1857dacad5SJay Sternberg #include <linux/blk-mq.h> 19dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h> 20ff5350a8SAndy Lutomirski #include <linux/dmi.h> 2157dacad5SJay Sternberg #include <linux/init.h> 2257dacad5SJay Sternberg #include <linux/interrupt.h> 2357dacad5SJay Sternberg #include <linux/io.h> 2457dacad5SJay Sternberg #include <linux/mm.h> 2557dacad5SJay Sternberg #include <linux/module.h> 2677bf25eaSKeith Busch #include <linux/mutex.h> 27d0877473SKeith Busch #include <linux/once.h> 2857dacad5SJay Sternberg #include <linux/pci.h> 2957dacad5SJay Sternberg #include <linux/t10-pi.h> 3057dacad5SJay Sternberg #include <linux/types.h> 319cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 32a98e58e5SScott Bauer #include <linux/sed-opal.h> 330f238ff5SLogan Gunthorpe #include <linux/pci-p2pdma.h> 3457dacad5SJay Sternberg 35604c01d5Syupeng #include "trace.h" 3657dacad5SJay Sternberg #include "nvme.h" 3757dacad5SJay Sternberg 3857dacad5SJay Sternberg #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 3957dacad5SJay Sternberg #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 4057dacad5SJay Sternberg 41a7a7cbe3SChaitanya Kulkarni #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 42adf68f21SChristoph Hellwig 43943e942eSJens Axboe /* 44943e942eSJens Axboe * These can be higher, but we need to ensure that any command doesn't 45943e942eSJens Axboe * require an sg allocation that needs more than a page of data. 46943e942eSJens Axboe */ 47943e942eSJens Axboe #define NVME_MAX_KB_SZ 4096 48943e942eSJens Axboe #define NVME_MAX_SEGS 127 49943e942eSJens Axboe 5057dacad5SJay Sternberg static int use_threaded_interrupts; 5157dacad5SJay Sternberg module_param(use_threaded_interrupts, int, 0); 5257dacad5SJay Sternberg 5357dacad5SJay Sternberg static bool use_cmb_sqes = true; 5469f4eb9fSKeith Busch module_param(use_cmb_sqes, bool, 0444); 5557dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 5657dacad5SJay Sternberg 5787ad72a5SChristoph Hellwig static unsigned int max_host_mem_size_mb = 128; 5887ad72a5SChristoph Hellwig module_param(max_host_mem_size_mb, uint, 0444); 5987ad72a5SChristoph Hellwig MODULE_PARM_DESC(max_host_mem_size_mb, 6087ad72a5SChristoph Hellwig "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 6157dacad5SJay Sternberg 62a7a7cbe3SChaitanya Kulkarni static unsigned int sgl_threshold = SZ_32K; 63a7a7cbe3SChaitanya Kulkarni module_param(sgl_threshold, uint, 0644); 64a7a7cbe3SChaitanya Kulkarni MODULE_PARM_DESC(sgl_threshold, 65a7a7cbe3SChaitanya Kulkarni "Use SGLs when average request segment size is larger or equal to " 66a7a7cbe3SChaitanya Kulkarni "this size. Use 0 to disable SGLs."); 67a7a7cbe3SChaitanya Kulkarni 68b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 69b27c1e68Sweiping zhang static const struct kernel_param_ops io_queue_depth_ops = { 70b27c1e68Sweiping zhang .set = io_queue_depth_set, 71b27c1e68Sweiping zhang .get = param_get_int, 72b27c1e68Sweiping zhang }; 73b27c1e68Sweiping zhang 74b27c1e68Sweiping zhang static int io_queue_depth = 1024; 75b27c1e68Sweiping zhang module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 76b27c1e68Sweiping zhang MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); 77b27c1e68Sweiping zhang 783b6592f7SJens Axboe static int queue_count_set(const char *val, const struct kernel_param *kp); 793b6592f7SJens Axboe static const struct kernel_param_ops queue_count_ops = { 803b6592f7SJens Axboe .set = queue_count_set, 813b6592f7SJens Axboe .get = param_get_int, 823b6592f7SJens Axboe }; 833b6592f7SJens Axboe 843b6592f7SJens Axboe static int write_queues; 853b6592f7SJens Axboe module_param_cb(write_queues, &queue_count_ops, &write_queues, 0644); 863b6592f7SJens Axboe MODULE_PARM_DESC(write_queues, 873b6592f7SJens Axboe "Number of queues to use for writes. If not set, reads and writes " 883b6592f7SJens Axboe "will share a queue set."); 893b6592f7SJens Axboe 90a4668d9bSJens Axboe static int poll_queues = 0; 914b04cc6aSJens Axboe module_param_cb(poll_queues, &queue_count_ops, &poll_queues, 0644); 924b04cc6aSJens Axboe MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 934b04cc6aSJens Axboe 941c63dc66SChristoph Hellwig struct nvme_dev; 951c63dc66SChristoph Hellwig struct nvme_queue; 9657dacad5SJay Sternberg 97a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 9857dacad5SJay Sternberg 9957dacad5SJay Sternberg /* 1001c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 1011c63dc66SChristoph Hellwig */ 1021c63dc66SChristoph Hellwig struct nvme_dev { 103147b27e4SSagi Grimberg struct nvme_queue *queues; 1041c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 1051c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 1061c63dc66SChristoph Hellwig u32 __iomem *dbs; 1071c63dc66SChristoph Hellwig struct device *dev; 1081c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 1091c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 1101c63dc66SChristoph Hellwig unsigned online_queues; 1111c63dc66SChristoph Hellwig unsigned max_qid; 112e20ba6e1SChristoph Hellwig unsigned io_queues[HCTX_MAX_TYPES]; 11322b55601SKeith Busch unsigned int num_vecs; 1141c63dc66SChristoph Hellwig int q_depth; 1151c63dc66SChristoph Hellwig u32 db_stride; 1161c63dc66SChristoph Hellwig void __iomem *bar; 11797f6ef64SXu Yu unsigned long bar_mapped_size; 1185c8809e6SChristoph Hellwig struct work_struct remove_work; 11977bf25eaSKeith Busch struct mutex shutdown_lock; 1201c63dc66SChristoph Hellwig bool subsystem; 1211c63dc66SChristoph Hellwig u64 cmb_size; 1220f238ff5SLogan Gunthorpe bool cmb_use_sqes; 1231c63dc66SChristoph Hellwig u32 cmbsz; 124202021c1SStephen Bates u32 cmbloc; 1251c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 12687ad72a5SChristoph Hellwig 127943e942eSJens Axboe mempool_t *iod_mempool; 128943e942eSJens Axboe 12987ad72a5SChristoph Hellwig /* shadow doorbell buffer support: */ 130f9f38e33SHelen Koike u32 *dbbuf_dbs; 131f9f38e33SHelen Koike dma_addr_t dbbuf_dbs_dma_addr; 132f9f38e33SHelen Koike u32 *dbbuf_eis; 133f9f38e33SHelen Koike dma_addr_t dbbuf_eis_dma_addr; 13487ad72a5SChristoph Hellwig 13587ad72a5SChristoph Hellwig /* host memory buffer support: */ 13687ad72a5SChristoph Hellwig u64 host_mem_size; 13787ad72a5SChristoph Hellwig u32 nr_host_mem_descs; 1384033f35dSChristoph Hellwig dma_addr_t host_mem_descs_dma; 13987ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *host_mem_descs; 14087ad72a5SChristoph Hellwig void **host_mem_desc_bufs; 14157dacad5SJay Sternberg }; 14257dacad5SJay Sternberg 143b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 144b27c1e68Sweiping zhang { 145b27c1e68Sweiping zhang int n = 0, ret; 146b27c1e68Sweiping zhang 147b27c1e68Sweiping zhang ret = kstrtoint(val, 10, &n); 148b27c1e68Sweiping zhang if (ret != 0 || n < 2) 149b27c1e68Sweiping zhang return -EINVAL; 150b27c1e68Sweiping zhang 151b27c1e68Sweiping zhang return param_set_int(val, kp); 152b27c1e68Sweiping zhang } 153b27c1e68Sweiping zhang 1543b6592f7SJens Axboe static int queue_count_set(const char *val, const struct kernel_param *kp) 1553b6592f7SJens Axboe { 1563b6592f7SJens Axboe int n = 0, ret; 1573b6592f7SJens Axboe 1583b6592f7SJens Axboe ret = kstrtoint(val, 10, &n); 1593b6592f7SJens Axboe if (n > num_possible_cpus()) 1603b6592f7SJens Axboe n = num_possible_cpus(); 1613b6592f7SJens Axboe 1623b6592f7SJens Axboe return param_set_int(val, kp); 1633b6592f7SJens Axboe } 1643b6592f7SJens Axboe 165f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride) 166f9f38e33SHelen Koike { 167f9f38e33SHelen Koike return qid * 2 * stride; 168f9f38e33SHelen Koike } 169f9f38e33SHelen Koike 170f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride) 171f9f38e33SHelen Koike { 172f9f38e33SHelen Koike return (qid * 2 + 1) * stride; 173f9f38e33SHelen Koike } 174f9f38e33SHelen Koike 1751c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1761c63dc66SChristoph Hellwig { 1771c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1781c63dc66SChristoph Hellwig } 1791c63dc66SChristoph Hellwig 18057dacad5SJay Sternberg /* 18157dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 18257dacad5SJay Sternberg * commands and one for I/O commands). 18357dacad5SJay Sternberg */ 18457dacad5SJay Sternberg struct nvme_queue { 18557dacad5SJay Sternberg struct device *q_dmadev; 18657dacad5SJay Sternberg struct nvme_dev *dev; 1871ab0cd69SJens Axboe spinlock_t sq_lock; 18857dacad5SJay Sternberg struct nvme_command *sq_cmds; 1893a7afd8eSChristoph Hellwig /* only used for poll queues: */ 1903a7afd8eSChristoph Hellwig spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 19157dacad5SJay Sternberg volatile struct nvme_completion *cqes; 19257dacad5SJay Sternberg struct blk_mq_tags **tags; 19357dacad5SJay Sternberg dma_addr_t sq_dma_addr; 19457dacad5SJay Sternberg dma_addr_t cq_dma_addr; 19557dacad5SJay Sternberg u32 __iomem *q_db; 19657dacad5SJay Sternberg u16 q_depth; 19757dacad5SJay Sternberg s16 cq_vector; 19857dacad5SJay Sternberg u16 sq_tail; 19904f3eafdSJens Axboe u16 last_sq_tail; 20057dacad5SJay Sternberg u16 cq_head; 20168fa9dbeSJens Axboe u16 last_cq_head; 20257dacad5SJay Sternberg u16 qid; 20357dacad5SJay Sternberg u8 cq_phase; 2044e224106SChristoph Hellwig unsigned long flags; 2054e224106SChristoph Hellwig #define NVMEQ_ENABLED 0 20663223078SChristoph Hellwig #define NVMEQ_SQ_CMB 1 207d1ed6aa1SChristoph Hellwig #define NVMEQ_DELETE_ERROR 2 208f9f38e33SHelen Koike u32 *dbbuf_sq_db; 209f9f38e33SHelen Koike u32 *dbbuf_cq_db; 210f9f38e33SHelen Koike u32 *dbbuf_sq_ei; 211f9f38e33SHelen Koike u32 *dbbuf_cq_ei; 212d1ed6aa1SChristoph Hellwig struct completion delete_done; 21357dacad5SJay Sternberg }; 21457dacad5SJay Sternberg 21557dacad5SJay Sternberg /* 21671bd150cSChristoph Hellwig * The nvme_iod describes the data in an I/O, including the list of PRP 21771bd150cSChristoph Hellwig * entries. You can't see it in this data structure because C doesn't let 218f4800d6dSChristoph Hellwig * me express that. Use nvme_init_iod to ensure there's enough space 21971bd150cSChristoph Hellwig * allocated to store the PRP list. 22071bd150cSChristoph Hellwig */ 22171bd150cSChristoph Hellwig struct nvme_iod { 222d49187e9SChristoph Hellwig struct nvme_request req; 223f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq; 224a7a7cbe3SChaitanya Kulkarni bool use_sgl; 225f4800d6dSChristoph Hellwig int aborted; 22671bd150cSChristoph Hellwig int npages; /* In the PRP list. 0 means small pool in use */ 22771bd150cSChristoph Hellwig int nents; /* Used in scatterlist */ 22871bd150cSChristoph Hellwig int length; /* Of data, in bytes */ 22971bd150cSChristoph Hellwig dma_addr_t first_dma; 230bf684057SChristoph Hellwig struct scatterlist meta_sg; /* metadata requires single contiguous buffer */ 231f4800d6dSChristoph Hellwig struct scatterlist *sg; 232f4800d6dSChristoph Hellwig struct scatterlist inline_sg[0]; 23357dacad5SJay Sternberg }; 23457dacad5SJay Sternberg 23557dacad5SJay Sternberg /* 23657dacad5SJay Sternberg * Check we didin't inadvertently grow the command struct 23757dacad5SJay Sternberg */ 23857dacad5SJay Sternberg static inline void _nvme_check_size(void) 23957dacad5SJay Sternberg { 24057dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 24157dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 24257dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 24357dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 24457dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 24557dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 24657dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 24757dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 2480add5e8eSJohannes Thumshirn BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 2490add5e8eSJohannes Thumshirn BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 25057dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 25157dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 252f9f38e33SHelen Koike BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 253f9f38e33SHelen Koike } 254f9f38e33SHelen Koike 2553b6592f7SJens Axboe static unsigned int max_io_queues(void) 2563b6592f7SJens Axboe { 2574b04cc6aSJens Axboe return num_possible_cpus() + write_queues + poll_queues; 2583b6592f7SJens Axboe } 2593b6592f7SJens Axboe 2603b6592f7SJens Axboe static unsigned int max_queue_count(void) 2613b6592f7SJens Axboe { 2623b6592f7SJens Axboe /* IO queues + admin queue */ 2633b6592f7SJens Axboe return 1 + max_io_queues(); 2643b6592f7SJens Axboe } 2653b6592f7SJens Axboe 266f9f38e33SHelen Koike static inline unsigned int nvme_dbbuf_size(u32 stride) 267f9f38e33SHelen Koike { 2683b6592f7SJens Axboe return (max_queue_count() * 8 * stride); 269f9f38e33SHelen Koike } 270f9f38e33SHelen Koike 271f9f38e33SHelen Koike static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 272f9f38e33SHelen Koike { 273f9f38e33SHelen Koike unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); 274f9f38e33SHelen Koike 275f9f38e33SHelen Koike if (dev->dbbuf_dbs) 276f9f38e33SHelen Koike return 0; 277f9f38e33SHelen Koike 278f9f38e33SHelen Koike dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 279f9f38e33SHelen Koike &dev->dbbuf_dbs_dma_addr, 280f9f38e33SHelen Koike GFP_KERNEL); 281f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 282f9f38e33SHelen Koike return -ENOMEM; 283f9f38e33SHelen Koike dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 284f9f38e33SHelen Koike &dev->dbbuf_eis_dma_addr, 285f9f38e33SHelen Koike GFP_KERNEL); 286f9f38e33SHelen Koike if (!dev->dbbuf_eis) { 287f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 288f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 289f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 290f9f38e33SHelen Koike return -ENOMEM; 291f9f38e33SHelen Koike } 292f9f38e33SHelen Koike 293f9f38e33SHelen Koike return 0; 294f9f38e33SHelen Koike } 295f9f38e33SHelen Koike 296f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 297f9f38e33SHelen Koike { 298f9f38e33SHelen Koike unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); 299f9f38e33SHelen Koike 300f9f38e33SHelen Koike if (dev->dbbuf_dbs) { 301f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 302f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 303f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 304f9f38e33SHelen Koike } 305f9f38e33SHelen Koike if (dev->dbbuf_eis) { 306f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 307f9f38e33SHelen Koike dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 308f9f38e33SHelen Koike dev->dbbuf_eis = NULL; 309f9f38e33SHelen Koike } 310f9f38e33SHelen Koike } 311f9f38e33SHelen Koike 312f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev, 313f9f38e33SHelen Koike struct nvme_queue *nvmeq, int qid) 314f9f38e33SHelen Koike { 315f9f38e33SHelen Koike if (!dev->dbbuf_dbs || !qid) 316f9f38e33SHelen Koike return; 317f9f38e33SHelen Koike 318f9f38e33SHelen Koike nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 319f9f38e33SHelen Koike nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 320f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 321f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 322f9f38e33SHelen Koike } 323f9f38e33SHelen Koike 324f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev) 325f9f38e33SHelen Koike { 326f9f38e33SHelen Koike struct nvme_command c; 327f9f38e33SHelen Koike 328f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 329f9f38e33SHelen Koike return; 330f9f38e33SHelen Koike 331f9f38e33SHelen Koike memset(&c, 0, sizeof(c)); 332f9f38e33SHelen Koike c.dbbuf.opcode = nvme_admin_dbbuf; 333f9f38e33SHelen Koike c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 334f9f38e33SHelen Koike c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 335f9f38e33SHelen Koike 336f9f38e33SHelen Koike if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 3379bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 338f9f38e33SHelen Koike /* Free memory and continue on */ 339f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 340f9f38e33SHelen Koike } 341f9f38e33SHelen Koike } 342f9f38e33SHelen Koike 343f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 344f9f38e33SHelen Koike { 345f9f38e33SHelen Koike return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 346f9f38e33SHelen Koike } 347f9f38e33SHelen Koike 348f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */ 349f9f38e33SHelen Koike static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 350f9f38e33SHelen Koike volatile u32 *dbbuf_ei) 351f9f38e33SHelen Koike { 352f9f38e33SHelen Koike if (dbbuf_db) { 353f9f38e33SHelen Koike u16 old_value; 354f9f38e33SHelen Koike 355f9f38e33SHelen Koike /* 356f9f38e33SHelen Koike * Ensure that the queue is written before updating 357f9f38e33SHelen Koike * the doorbell in memory 358f9f38e33SHelen Koike */ 359f9f38e33SHelen Koike wmb(); 360f9f38e33SHelen Koike 361f9f38e33SHelen Koike old_value = *dbbuf_db; 362f9f38e33SHelen Koike *dbbuf_db = value; 363f9f38e33SHelen Koike 364f1ed3df2SMichal Wnukowski /* 365f1ed3df2SMichal Wnukowski * Ensure that the doorbell is updated before reading the event 366f1ed3df2SMichal Wnukowski * index from memory. The controller needs to provide similar 367f1ed3df2SMichal Wnukowski * ordering to ensure the envent index is updated before reading 368f1ed3df2SMichal Wnukowski * the doorbell. 369f1ed3df2SMichal Wnukowski */ 370f1ed3df2SMichal Wnukowski mb(); 371f1ed3df2SMichal Wnukowski 372f9f38e33SHelen Koike if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 373f9f38e33SHelen Koike return false; 374f9f38e33SHelen Koike } 375f9f38e33SHelen Koike 376f9f38e33SHelen Koike return true; 37757dacad5SJay Sternberg } 37857dacad5SJay Sternberg 37957dacad5SJay Sternberg /* 38057dacad5SJay Sternberg * Max size of iod being embedded in the request payload 38157dacad5SJay Sternberg */ 38257dacad5SJay Sternberg #define NVME_INT_PAGES 2 3835fd4ce1bSChristoph Hellwig #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size) 38457dacad5SJay Sternberg 38557dacad5SJay Sternberg /* 38657dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 38757dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 38857dacad5SJay Sternberg * the I/O. 38957dacad5SJay Sternberg */ 39057dacad5SJay Sternberg static int nvme_npages(unsigned size, struct nvme_dev *dev) 39157dacad5SJay Sternberg { 3925fd4ce1bSChristoph Hellwig unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size, 3935fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 39457dacad5SJay Sternberg return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 39557dacad5SJay Sternberg } 39657dacad5SJay Sternberg 397a7a7cbe3SChaitanya Kulkarni /* 398a7a7cbe3SChaitanya Kulkarni * Calculates the number of pages needed for the SGL segments. For example a 4k 399a7a7cbe3SChaitanya Kulkarni * page can accommodate 256 SGL descriptors. 400a7a7cbe3SChaitanya Kulkarni */ 401a7a7cbe3SChaitanya Kulkarni static int nvme_pci_npages_sgl(unsigned int num_seg) 402f4800d6dSChristoph Hellwig { 403a7a7cbe3SChaitanya Kulkarni return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE); 404f4800d6dSChristoph Hellwig } 405f4800d6dSChristoph Hellwig 406a7a7cbe3SChaitanya Kulkarni static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev, 407a7a7cbe3SChaitanya Kulkarni unsigned int size, unsigned int nseg, bool use_sgl) 40857dacad5SJay Sternberg { 409a7a7cbe3SChaitanya Kulkarni size_t alloc_size; 410a7a7cbe3SChaitanya Kulkarni 411a7a7cbe3SChaitanya Kulkarni if (use_sgl) 412a7a7cbe3SChaitanya Kulkarni alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg); 413a7a7cbe3SChaitanya Kulkarni else 414a7a7cbe3SChaitanya Kulkarni alloc_size = sizeof(__le64 *) * nvme_npages(size, dev); 415a7a7cbe3SChaitanya Kulkarni 416a7a7cbe3SChaitanya Kulkarni return alloc_size + sizeof(struct scatterlist) * nseg; 417a7a7cbe3SChaitanya Kulkarni } 418a7a7cbe3SChaitanya Kulkarni 419a7a7cbe3SChaitanya Kulkarni static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl) 420a7a7cbe3SChaitanya Kulkarni { 421a7a7cbe3SChaitanya Kulkarni unsigned int alloc_size = nvme_pci_iod_alloc_size(dev, 422a7a7cbe3SChaitanya Kulkarni NVME_INT_BYTES(dev), NVME_INT_PAGES, 423a7a7cbe3SChaitanya Kulkarni use_sgl); 424a7a7cbe3SChaitanya Kulkarni 425a7a7cbe3SChaitanya Kulkarni return sizeof(struct nvme_iod) + alloc_size; 42657dacad5SJay Sternberg } 42757dacad5SJay Sternberg 42857dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 42957dacad5SJay Sternberg unsigned int hctx_idx) 43057dacad5SJay Sternberg { 43157dacad5SJay Sternberg struct nvme_dev *dev = data; 432147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 43357dacad5SJay Sternberg 43457dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 43557dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 43657dacad5SJay Sternberg WARN_ON(nvmeq->tags); 43757dacad5SJay Sternberg 43857dacad5SJay Sternberg hctx->driver_data = nvmeq; 43957dacad5SJay Sternberg nvmeq->tags = &dev->admin_tagset.tags[0]; 44057dacad5SJay Sternberg return 0; 44157dacad5SJay Sternberg } 44257dacad5SJay Sternberg 44357dacad5SJay Sternberg static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 44457dacad5SJay Sternberg { 44557dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 44657dacad5SJay Sternberg 44757dacad5SJay Sternberg nvmeq->tags = NULL; 44857dacad5SJay Sternberg } 44957dacad5SJay Sternberg 45057dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 45157dacad5SJay Sternberg unsigned int hctx_idx) 45257dacad5SJay Sternberg { 45357dacad5SJay Sternberg struct nvme_dev *dev = data; 454147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; 45557dacad5SJay Sternberg 45657dacad5SJay Sternberg if (!nvmeq->tags) 45757dacad5SJay Sternberg nvmeq->tags = &dev->tagset.tags[hctx_idx]; 45857dacad5SJay Sternberg 45957dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 46057dacad5SJay Sternberg hctx->driver_data = nvmeq; 46157dacad5SJay Sternberg return 0; 46257dacad5SJay Sternberg } 46357dacad5SJay Sternberg 464d6296d39SChristoph Hellwig static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, 465d6296d39SChristoph Hellwig unsigned int hctx_idx, unsigned int numa_node) 46657dacad5SJay Sternberg { 467d6296d39SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 468f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 4690350815aSChristoph Hellwig int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; 470147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[queue_idx]; 47157dacad5SJay Sternberg 47257dacad5SJay Sternberg BUG_ON(!nvmeq); 473f4800d6dSChristoph Hellwig iod->nvmeq = nvmeq; 47459e29ce6SSagi Grimberg 47559e29ce6SSagi Grimberg nvme_req(req)->ctrl = &dev->ctrl; 47657dacad5SJay Sternberg return 0; 47757dacad5SJay Sternberg } 47857dacad5SJay Sternberg 4793b6592f7SJens Axboe static int queue_irq_offset(struct nvme_dev *dev) 4803b6592f7SJens Axboe { 4813b6592f7SJens Axboe /* if we have more than 1 vec, admin queue offsets us by 1 */ 4823b6592f7SJens Axboe if (dev->num_vecs > 1) 4833b6592f7SJens Axboe return 1; 4843b6592f7SJens Axboe 4853b6592f7SJens Axboe return 0; 4863b6592f7SJens Axboe } 4873b6592f7SJens Axboe 488dca51e78SChristoph Hellwig static int nvme_pci_map_queues(struct blk_mq_tag_set *set) 489dca51e78SChristoph Hellwig { 490dca51e78SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 4913b6592f7SJens Axboe int i, qoff, offset; 492dca51e78SChristoph Hellwig 4933b6592f7SJens Axboe offset = queue_irq_offset(dev); 4943b6592f7SJens Axboe for (i = 0, qoff = 0; i < set->nr_maps; i++) { 4953b6592f7SJens Axboe struct blk_mq_queue_map *map = &set->map[i]; 4963b6592f7SJens Axboe 4973b6592f7SJens Axboe map->nr_queues = dev->io_queues[i]; 4983b6592f7SJens Axboe if (!map->nr_queues) { 499e20ba6e1SChristoph Hellwig BUG_ON(i == HCTX_TYPE_DEFAULT); 5007e849dd9SChristoph Hellwig continue; 5013b6592f7SJens Axboe } 5023b6592f7SJens Axboe 5034b04cc6aSJens Axboe /* 5044b04cc6aSJens Axboe * The poll queue(s) doesn't have an IRQ (and hence IRQ 5054b04cc6aSJens Axboe * affinity), so use the regular blk-mq cpu mapping 5064b04cc6aSJens Axboe */ 5073b6592f7SJens Axboe map->queue_offset = qoff; 508e20ba6e1SChristoph Hellwig if (i != HCTX_TYPE_POLL) 5093b6592f7SJens Axboe blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); 5104b04cc6aSJens Axboe else 5114b04cc6aSJens Axboe blk_mq_map_queues(map); 5123b6592f7SJens Axboe qoff += map->nr_queues; 5133b6592f7SJens Axboe offset += map->nr_queues; 5143b6592f7SJens Axboe } 5153b6592f7SJens Axboe 5163b6592f7SJens Axboe return 0; 517dca51e78SChristoph Hellwig } 518dca51e78SChristoph Hellwig 51904f3eafdSJens Axboe /* 52004f3eafdSJens Axboe * Write sq tail if we are asked to, or if the next command would wrap. 52104f3eafdSJens Axboe */ 52204f3eafdSJens Axboe static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 52304f3eafdSJens Axboe { 52404f3eafdSJens Axboe if (!write_sq) { 52504f3eafdSJens Axboe u16 next_tail = nvmeq->sq_tail + 1; 52604f3eafdSJens Axboe 52704f3eafdSJens Axboe if (next_tail == nvmeq->q_depth) 52804f3eafdSJens Axboe next_tail = 0; 52904f3eafdSJens Axboe if (next_tail != nvmeq->last_sq_tail) 53004f3eafdSJens Axboe return; 53104f3eafdSJens Axboe } 53204f3eafdSJens Axboe 53304f3eafdSJens Axboe if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 53404f3eafdSJens Axboe nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 53504f3eafdSJens Axboe writel(nvmeq->sq_tail, nvmeq->q_db); 53604f3eafdSJens Axboe nvmeq->last_sq_tail = nvmeq->sq_tail; 53704f3eafdSJens Axboe } 53804f3eafdSJens Axboe 53957dacad5SJay Sternberg /** 54090ea5ca4SChristoph Hellwig * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 54157dacad5SJay Sternberg * @nvmeq: The queue to use 54257dacad5SJay Sternberg * @cmd: The command to send 54304f3eafdSJens Axboe * @write_sq: whether to write to the SQ doorbell 54457dacad5SJay Sternberg */ 54504f3eafdSJens Axboe static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, 54604f3eafdSJens Axboe bool write_sq) 54757dacad5SJay Sternberg { 54890ea5ca4SChristoph Hellwig spin_lock(&nvmeq->sq_lock); 54990ea5ca4SChristoph Hellwig memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd)); 55090ea5ca4SChristoph Hellwig if (++nvmeq->sq_tail == nvmeq->q_depth) 55190ea5ca4SChristoph Hellwig nvmeq->sq_tail = 0; 55204f3eafdSJens Axboe nvme_write_sq_db(nvmeq, write_sq); 55304f3eafdSJens Axboe spin_unlock(&nvmeq->sq_lock); 55404f3eafdSJens Axboe } 55504f3eafdSJens Axboe 55604f3eafdSJens Axboe static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 55704f3eafdSJens Axboe { 55804f3eafdSJens Axboe struct nvme_queue *nvmeq = hctx->driver_data; 55904f3eafdSJens Axboe 56004f3eafdSJens Axboe spin_lock(&nvmeq->sq_lock); 56104f3eafdSJens Axboe if (nvmeq->sq_tail != nvmeq->last_sq_tail) 56204f3eafdSJens Axboe nvme_write_sq_db(nvmeq, true); 56390ea5ca4SChristoph Hellwig spin_unlock(&nvmeq->sq_lock); 56457dacad5SJay Sternberg } 56557dacad5SJay Sternberg 566a7a7cbe3SChaitanya Kulkarni static void **nvme_pci_iod_list(struct request *req) 56757dacad5SJay Sternberg { 568f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 569a7a7cbe3SChaitanya Kulkarni return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); 57057dacad5SJay Sternberg } 57157dacad5SJay Sternberg 572955b1b5aSMinwoo Im static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) 573955b1b5aSMinwoo Im { 574955b1b5aSMinwoo Im struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 57520469a37SKeith Busch int nseg = blk_rq_nr_phys_segments(req); 576955b1b5aSMinwoo Im unsigned int avg_seg_size; 577955b1b5aSMinwoo Im 57820469a37SKeith Busch if (nseg == 0) 57920469a37SKeith Busch return false; 58020469a37SKeith Busch 58120469a37SKeith Busch avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 582955b1b5aSMinwoo Im 583955b1b5aSMinwoo Im if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) 584955b1b5aSMinwoo Im return false; 585955b1b5aSMinwoo Im if (!iod->nvmeq->qid) 586955b1b5aSMinwoo Im return false; 587955b1b5aSMinwoo Im if (!sgl_threshold || avg_seg_size < sgl_threshold) 588955b1b5aSMinwoo Im return false; 589955b1b5aSMinwoo Im return true; 590955b1b5aSMinwoo Im } 591955b1b5aSMinwoo Im 592fc17b653SChristoph Hellwig static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) 59357dacad5SJay Sternberg { 594f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 595f9d03f96SChristoph Hellwig int nseg = blk_rq_nr_phys_segments(rq); 596b131c61dSChristoph Hellwig unsigned int size = blk_rq_payload_bytes(rq); 597f4800d6dSChristoph Hellwig 598955b1b5aSMinwoo Im iod->use_sgl = nvme_pci_use_sgls(dev, rq); 599955b1b5aSMinwoo Im 600f4800d6dSChristoph Hellwig if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 601943e942eSJens Axboe iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); 602f4800d6dSChristoph Hellwig if (!iod->sg) 603fc17b653SChristoph Hellwig return BLK_STS_RESOURCE; 604f4800d6dSChristoph Hellwig } else { 605f4800d6dSChristoph Hellwig iod->sg = iod->inline_sg; 60657dacad5SJay Sternberg } 60757dacad5SJay Sternberg 608f4800d6dSChristoph Hellwig iod->aborted = 0; 60957dacad5SJay Sternberg iod->npages = -1; 61057dacad5SJay Sternberg iod->nents = 0; 611f4800d6dSChristoph Hellwig iod->length = size; 612f80ec966SKeith Busch 613fc17b653SChristoph Hellwig return BLK_STS_OK; 61457dacad5SJay Sternberg } 61557dacad5SJay Sternberg 616f4800d6dSChristoph Hellwig static void nvme_free_iod(struct nvme_dev *dev, struct request *req) 61757dacad5SJay Sternberg { 618f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 619a7a7cbe3SChaitanya Kulkarni const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1; 620a7a7cbe3SChaitanya Kulkarni dma_addr_t dma_addr = iod->first_dma, next_dma_addr; 621a7a7cbe3SChaitanya Kulkarni 62257dacad5SJay Sternberg int i; 62357dacad5SJay Sternberg 62457dacad5SJay Sternberg if (iod->npages == 0) 625a7a7cbe3SChaitanya Kulkarni dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], 626a7a7cbe3SChaitanya Kulkarni dma_addr); 627a7a7cbe3SChaitanya Kulkarni 62857dacad5SJay Sternberg for (i = 0; i < iod->npages; i++) { 629a7a7cbe3SChaitanya Kulkarni void *addr = nvme_pci_iod_list(req)[i]; 630a7a7cbe3SChaitanya Kulkarni 631a7a7cbe3SChaitanya Kulkarni if (iod->use_sgl) { 632a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *sg_list = addr; 633a7a7cbe3SChaitanya Kulkarni 634a7a7cbe3SChaitanya Kulkarni next_dma_addr = 635a7a7cbe3SChaitanya Kulkarni le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); 636a7a7cbe3SChaitanya Kulkarni } else { 637a7a7cbe3SChaitanya Kulkarni __le64 *prp_list = addr; 638a7a7cbe3SChaitanya Kulkarni 639a7a7cbe3SChaitanya Kulkarni next_dma_addr = le64_to_cpu(prp_list[last_prp]); 640a7a7cbe3SChaitanya Kulkarni } 641a7a7cbe3SChaitanya Kulkarni 642a7a7cbe3SChaitanya Kulkarni dma_pool_free(dev->prp_page_pool, addr, dma_addr); 643a7a7cbe3SChaitanya Kulkarni dma_addr = next_dma_addr; 64457dacad5SJay Sternberg } 64557dacad5SJay Sternberg 646f4800d6dSChristoph Hellwig if (iod->sg != iod->inline_sg) 647943e942eSJens Axboe mempool_free(iod->sg, dev->iod_mempool); 64857dacad5SJay Sternberg } 64957dacad5SJay Sternberg 650d0877473SKeith Busch static void nvme_print_sgl(struct scatterlist *sgl, int nents) 651d0877473SKeith Busch { 652d0877473SKeith Busch int i; 653d0877473SKeith Busch struct scatterlist *sg; 654d0877473SKeith Busch 655d0877473SKeith Busch for_each_sg(sgl, sg, nents, i) { 656d0877473SKeith Busch dma_addr_t phys = sg_phys(sg); 657d0877473SKeith Busch pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 658d0877473SKeith Busch "dma_address:%pad dma_length:%d\n", 659d0877473SKeith Busch i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 660d0877473SKeith Busch sg_dma_len(sg)); 661d0877473SKeith Busch } 662d0877473SKeith Busch } 663d0877473SKeith Busch 664a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, 665a7a7cbe3SChaitanya Kulkarni struct request *req, struct nvme_rw_command *cmnd) 66657dacad5SJay Sternberg { 667f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 66857dacad5SJay Sternberg struct dma_pool *pool; 669b131c61dSChristoph Hellwig int length = blk_rq_payload_bytes(req); 67057dacad5SJay Sternberg struct scatterlist *sg = iod->sg; 67157dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 67257dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 6735fd4ce1bSChristoph Hellwig u32 page_size = dev->ctrl.page_size; 67457dacad5SJay Sternberg int offset = dma_addr & (page_size - 1); 67557dacad5SJay Sternberg __le64 *prp_list; 676a7a7cbe3SChaitanya Kulkarni void **list = nvme_pci_iod_list(req); 67757dacad5SJay Sternberg dma_addr_t prp_dma; 67857dacad5SJay Sternberg int nprps, i; 67957dacad5SJay Sternberg 68057dacad5SJay Sternberg length -= (page_size - offset); 6815228b328SJan H. Schönherr if (length <= 0) { 6825228b328SJan H. Schönherr iod->first_dma = 0; 683a7a7cbe3SChaitanya Kulkarni goto done; 6845228b328SJan H. Schönherr } 68557dacad5SJay Sternberg 68657dacad5SJay Sternberg dma_len -= (page_size - offset); 68757dacad5SJay Sternberg if (dma_len) { 68857dacad5SJay Sternberg dma_addr += (page_size - offset); 68957dacad5SJay Sternberg } else { 69057dacad5SJay Sternberg sg = sg_next(sg); 69157dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 69257dacad5SJay Sternberg dma_len = sg_dma_len(sg); 69357dacad5SJay Sternberg } 69457dacad5SJay Sternberg 69557dacad5SJay Sternberg if (length <= page_size) { 69657dacad5SJay Sternberg iod->first_dma = dma_addr; 697a7a7cbe3SChaitanya Kulkarni goto done; 69857dacad5SJay Sternberg } 69957dacad5SJay Sternberg 70057dacad5SJay Sternberg nprps = DIV_ROUND_UP(length, page_size); 70157dacad5SJay Sternberg if (nprps <= (256 / 8)) { 70257dacad5SJay Sternberg pool = dev->prp_small_pool; 70357dacad5SJay Sternberg iod->npages = 0; 70457dacad5SJay Sternberg } else { 70557dacad5SJay Sternberg pool = dev->prp_page_pool; 70657dacad5SJay Sternberg iod->npages = 1; 70757dacad5SJay Sternberg } 70857dacad5SJay Sternberg 70969d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 71057dacad5SJay Sternberg if (!prp_list) { 71157dacad5SJay Sternberg iod->first_dma = dma_addr; 71257dacad5SJay Sternberg iod->npages = -1; 71386eea289SKeith Busch return BLK_STS_RESOURCE; 71457dacad5SJay Sternberg } 71557dacad5SJay Sternberg list[0] = prp_list; 71657dacad5SJay Sternberg iod->first_dma = prp_dma; 71757dacad5SJay Sternberg i = 0; 71857dacad5SJay Sternberg for (;;) { 71957dacad5SJay Sternberg if (i == page_size >> 3) { 72057dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 72169d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 72257dacad5SJay Sternberg if (!prp_list) 72386eea289SKeith Busch return BLK_STS_RESOURCE; 72457dacad5SJay Sternberg list[iod->npages++] = prp_list; 72557dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 72657dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 72757dacad5SJay Sternberg i = 1; 72857dacad5SJay Sternberg } 72957dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 73057dacad5SJay Sternberg dma_len -= page_size; 73157dacad5SJay Sternberg dma_addr += page_size; 73257dacad5SJay Sternberg length -= page_size; 73357dacad5SJay Sternberg if (length <= 0) 73457dacad5SJay Sternberg break; 73557dacad5SJay Sternberg if (dma_len > 0) 73657dacad5SJay Sternberg continue; 73786eea289SKeith Busch if (unlikely(dma_len < 0)) 73886eea289SKeith Busch goto bad_sgl; 73957dacad5SJay Sternberg sg = sg_next(sg); 74057dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 74157dacad5SJay Sternberg dma_len = sg_dma_len(sg); 74257dacad5SJay Sternberg } 74357dacad5SJay Sternberg 744a7a7cbe3SChaitanya Kulkarni done: 745a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 746a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); 747a7a7cbe3SChaitanya Kulkarni 74886eea289SKeith Busch return BLK_STS_OK; 74986eea289SKeith Busch 75086eea289SKeith Busch bad_sgl: 751d0877473SKeith Busch WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), 752d0877473SKeith Busch "Invalid SGL for payload:%d nents:%d\n", 753d0877473SKeith Busch blk_rq_payload_bytes(req), iod->nents); 75486eea289SKeith Busch return BLK_STS_IOERR; 75557dacad5SJay Sternberg } 75657dacad5SJay Sternberg 757a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 758a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg) 759a7a7cbe3SChaitanya Kulkarni { 760a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(sg_dma_address(sg)); 761a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(sg_dma_len(sg)); 762a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_DATA_DESC << 4; 763a7a7cbe3SChaitanya Kulkarni } 764a7a7cbe3SChaitanya Kulkarni 765a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 766a7a7cbe3SChaitanya Kulkarni dma_addr_t dma_addr, int entries) 767a7a7cbe3SChaitanya Kulkarni { 768a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(dma_addr); 769a7a7cbe3SChaitanya Kulkarni if (entries < SGES_PER_PAGE) { 770a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(entries * sizeof(*sge)); 771a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 772a7a7cbe3SChaitanya Kulkarni } else { 773a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(PAGE_SIZE); 774a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_SEG_DESC << 4; 775a7a7cbe3SChaitanya Kulkarni } 776a7a7cbe3SChaitanya Kulkarni } 777a7a7cbe3SChaitanya Kulkarni 778a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 779b0f2853bSChristoph Hellwig struct request *req, struct nvme_rw_command *cmd, int entries) 780a7a7cbe3SChaitanya Kulkarni { 781a7a7cbe3SChaitanya Kulkarni struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 782a7a7cbe3SChaitanya Kulkarni struct dma_pool *pool; 783a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *sg_list; 784a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg = iod->sg; 785a7a7cbe3SChaitanya Kulkarni dma_addr_t sgl_dma; 786b0f2853bSChristoph Hellwig int i = 0; 787a7a7cbe3SChaitanya Kulkarni 788a7a7cbe3SChaitanya Kulkarni /* setting the transfer type as SGL */ 789a7a7cbe3SChaitanya Kulkarni cmd->flags = NVME_CMD_SGL_METABUF; 790a7a7cbe3SChaitanya Kulkarni 791b0f2853bSChristoph Hellwig if (entries == 1) { 792a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 793a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 794a7a7cbe3SChaitanya Kulkarni } 795a7a7cbe3SChaitanya Kulkarni 796a7a7cbe3SChaitanya Kulkarni if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { 797a7a7cbe3SChaitanya Kulkarni pool = dev->prp_small_pool; 798a7a7cbe3SChaitanya Kulkarni iod->npages = 0; 799a7a7cbe3SChaitanya Kulkarni } else { 800a7a7cbe3SChaitanya Kulkarni pool = dev->prp_page_pool; 801a7a7cbe3SChaitanya Kulkarni iod->npages = 1; 802a7a7cbe3SChaitanya Kulkarni } 803a7a7cbe3SChaitanya Kulkarni 804a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 805a7a7cbe3SChaitanya Kulkarni if (!sg_list) { 806a7a7cbe3SChaitanya Kulkarni iod->npages = -1; 807a7a7cbe3SChaitanya Kulkarni return BLK_STS_RESOURCE; 808a7a7cbe3SChaitanya Kulkarni } 809a7a7cbe3SChaitanya Kulkarni 810a7a7cbe3SChaitanya Kulkarni nvme_pci_iod_list(req)[0] = sg_list; 811a7a7cbe3SChaitanya Kulkarni iod->first_dma = sgl_dma; 812a7a7cbe3SChaitanya Kulkarni 813a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); 814a7a7cbe3SChaitanya Kulkarni 815a7a7cbe3SChaitanya Kulkarni do { 816a7a7cbe3SChaitanya Kulkarni if (i == SGES_PER_PAGE) { 817a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *old_sg_desc = sg_list; 818a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; 819a7a7cbe3SChaitanya Kulkarni 820a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 821a7a7cbe3SChaitanya Kulkarni if (!sg_list) 822a7a7cbe3SChaitanya Kulkarni return BLK_STS_RESOURCE; 823a7a7cbe3SChaitanya Kulkarni 824a7a7cbe3SChaitanya Kulkarni i = 0; 825a7a7cbe3SChaitanya Kulkarni nvme_pci_iod_list(req)[iod->npages++] = sg_list; 826a7a7cbe3SChaitanya Kulkarni sg_list[i++] = *link; 827a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(link, sgl_dma, entries); 828a7a7cbe3SChaitanya Kulkarni } 829a7a7cbe3SChaitanya Kulkarni 830a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&sg_list[i++], sg); 831a7a7cbe3SChaitanya Kulkarni sg = sg_next(sg); 832b0f2853bSChristoph Hellwig } while (--entries > 0); 833a7a7cbe3SChaitanya Kulkarni 834a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 835a7a7cbe3SChaitanya Kulkarni } 836a7a7cbe3SChaitanya Kulkarni 837fc17b653SChristoph Hellwig static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 838b131c61dSChristoph Hellwig struct nvme_command *cmnd) 83957dacad5SJay Sternberg { 840f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 841ba1ca37eSChristoph Hellwig struct request_queue *q = req->q; 842ba1ca37eSChristoph Hellwig enum dma_data_direction dma_dir = rq_data_dir(req) ? 843ba1ca37eSChristoph Hellwig DMA_TO_DEVICE : DMA_FROM_DEVICE; 844fc17b653SChristoph Hellwig blk_status_t ret = BLK_STS_IOERR; 845b0f2853bSChristoph Hellwig int nr_mapped; 84657dacad5SJay Sternberg 847f9d03f96SChristoph Hellwig sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 848ba1ca37eSChristoph Hellwig iod->nents = blk_rq_map_sg(q, req, iod->sg); 849ba1ca37eSChristoph Hellwig if (!iod->nents) 850ba1ca37eSChristoph Hellwig goto out; 851ba1ca37eSChristoph Hellwig 852fc17b653SChristoph Hellwig ret = BLK_STS_RESOURCE; 853e0596ab2SLogan Gunthorpe 854e0596ab2SLogan Gunthorpe if (is_pci_p2pdma_page(sg_page(iod->sg))) 855e0596ab2SLogan Gunthorpe nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents, 856e0596ab2SLogan Gunthorpe dma_dir); 857e0596ab2SLogan Gunthorpe else 858e0596ab2SLogan Gunthorpe nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, 859e0596ab2SLogan Gunthorpe dma_dir, DMA_ATTR_NO_WARN); 860b0f2853bSChristoph Hellwig if (!nr_mapped) 861ba1ca37eSChristoph Hellwig goto out; 862ba1ca37eSChristoph Hellwig 863955b1b5aSMinwoo Im if (iod->use_sgl) 864b0f2853bSChristoph Hellwig ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); 865a7a7cbe3SChaitanya Kulkarni else 866a7a7cbe3SChaitanya Kulkarni ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 867a7a7cbe3SChaitanya Kulkarni 86886eea289SKeith Busch if (ret != BLK_STS_OK) 869ba1ca37eSChristoph Hellwig goto out_unmap; 870ba1ca37eSChristoph Hellwig 871fc17b653SChristoph Hellwig ret = BLK_STS_IOERR; 872ba1ca37eSChristoph Hellwig if (blk_integrity_rq(req)) { 873ba1ca37eSChristoph Hellwig if (blk_rq_count_integrity_sg(q, req->bio) != 1) 874ba1ca37eSChristoph Hellwig goto out_unmap; 875ba1ca37eSChristoph Hellwig 876bf684057SChristoph Hellwig sg_init_table(&iod->meta_sg, 1); 877bf684057SChristoph Hellwig if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) 878ba1ca37eSChristoph Hellwig goto out_unmap; 879ba1ca37eSChristoph Hellwig 880bf684057SChristoph Hellwig if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) 881ba1ca37eSChristoph Hellwig goto out_unmap; 8823045c0d0SChaitanya Kulkarni 8833045c0d0SChaitanya Kulkarni cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); 88457dacad5SJay Sternberg } 88557dacad5SJay Sternberg 886fc17b653SChristoph Hellwig return BLK_STS_OK; 887ba1ca37eSChristoph Hellwig 888ba1ca37eSChristoph Hellwig out_unmap: 889ba1ca37eSChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 890ba1ca37eSChristoph Hellwig out: 891ba1ca37eSChristoph Hellwig return ret; 89257dacad5SJay Sternberg } 89357dacad5SJay Sternberg 894f4800d6dSChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 895d4f6c3abSChristoph Hellwig { 896f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 897d4f6c3abSChristoph Hellwig enum dma_data_direction dma_dir = rq_data_dir(req) ? 898d4f6c3abSChristoph Hellwig DMA_TO_DEVICE : DMA_FROM_DEVICE; 899d4f6c3abSChristoph Hellwig 900d4f6c3abSChristoph Hellwig if (iod->nents) { 901e0596ab2SLogan Gunthorpe /* P2PDMA requests do not need to be unmapped */ 902e0596ab2SLogan Gunthorpe if (!is_pci_p2pdma_page(sg_page(iod->sg))) 903d4f6c3abSChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 904e0596ab2SLogan Gunthorpe 905f7f1fc36SMax Gurtovoy if (blk_integrity_rq(req)) 906bf684057SChristoph Hellwig dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); 907d4f6c3abSChristoph Hellwig } 908d4f6c3abSChristoph Hellwig 909f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 910f4800d6dSChristoph Hellwig nvme_free_iod(dev, req); 91157dacad5SJay Sternberg } 91257dacad5SJay Sternberg 91357dacad5SJay Sternberg /* 91457dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 91557dacad5SJay Sternberg */ 916fc17b653SChristoph Hellwig static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 91757dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 91857dacad5SJay Sternberg { 91957dacad5SJay Sternberg struct nvme_ns *ns = hctx->queue->queuedata; 92057dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 92157dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 92257dacad5SJay Sternberg struct request *req = bd->rq; 923ba1ca37eSChristoph Hellwig struct nvme_command cmnd; 924ebe6d874SChristoph Hellwig blk_status_t ret; 92557dacad5SJay Sternberg 926d1f06f4aSJens Axboe /* 927d1f06f4aSJens Axboe * We should not need to do this, but we're still using this to 928d1f06f4aSJens Axboe * ensure we can drain requests on a dying queue. 929d1f06f4aSJens Axboe */ 9304e224106SChristoph Hellwig if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 931d1f06f4aSJens Axboe return BLK_STS_IOERR; 932d1f06f4aSJens Axboe 933f9d03f96SChristoph Hellwig ret = nvme_setup_cmd(ns, req, &cmnd); 934fc17b653SChristoph Hellwig if (ret) 935f4800d6dSChristoph Hellwig return ret; 93657dacad5SJay Sternberg 937b131c61dSChristoph Hellwig ret = nvme_init_iod(req, dev); 938fc17b653SChristoph Hellwig if (ret) 939f9d03f96SChristoph Hellwig goto out_free_cmd; 94057dacad5SJay Sternberg 941fc17b653SChristoph Hellwig if (blk_rq_nr_phys_segments(req)) { 942b131c61dSChristoph Hellwig ret = nvme_map_data(dev, req, &cmnd); 943fc17b653SChristoph Hellwig if (ret) 944f9d03f96SChristoph Hellwig goto out_cleanup_iod; 945fc17b653SChristoph Hellwig } 946ba1ca37eSChristoph Hellwig 947aae239e1SChristoph Hellwig blk_mq_start_request(req); 94804f3eafdSJens Axboe nvme_submit_cmd(nvmeq, &cmnd, bd->last); 949fc17b653SChristoph Hellwig return BLK_STS_OK; 950f9d03f96SChristoph Hellwig out_cleanup_iod: 951f4800d6dSChristoph Hellwig nvme_free_iod(dev, req); 952f9d03f96SChristoph Hellwig out_free_cmd: 953f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 954ba1ca37eSChristoph Hellwig return ret; 95557dacad5SJay Sternberg } 95657dacad5SJay Sternberg 95777f02a7aSChristoph Hellwig static void nvme_pci_complete_rq(struct request *req) 958eee417b0SChristoph Hellwig { 959f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 960eee417b0SChristoph Hellwig 96177f02a7aSChristoph Hellwig nvme_unmap_data(iod->nvmeq->dev, req); 96277f02a7aSChristoph Hellwig nvme_complete_rq(req); 96357dacad5SJay Sternberg } 96457dacad5SJay Sternberg 965d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */ 966750dde44SChristoph Hellwig static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 967d783e0bdSMarta Rybczynska { 968750dde44SChristoph Hellwig return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) == 969750dde44SChristoph Hellwig nvmeq->cq_phase; 970d783e0bdSMarta Rybczynska } 971d783e0bdSMarta Rybczynska 972eb281c82SSagi Grimberg static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 97357dacad5SJay Sternberg { 974eb281c82SSagi Grimberg u16 head = nvmeq->cq_head; 97557dacad5SJay Sternberg 976eb281c82SSagi Grimberg if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 977eb281c82SSagi Grimberg nvmeq->dbbuf_cq_ei)) 978eb281c82SSagi Grimberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 979eb281c82SSagi Grimberg } 980adf68f21SChristoph Hellwig 9815cb525c8SJens Axboe static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) 98257dacad5SJay Sternberg { 9835cb525c8SJens Axboe volatile struct nvme_completion *cqe = &nvmeq->cqes[idx]; 98457dacad5SJay Sternberg struct request *req; 985adf68f21SChristoph Hellwig 98683a12fb7SSagi Grimberg if (unlikely(cqe->command_id >= nvmeq->q_depth)) { 9871b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 988aae239e1SChristoph Hellwig "invalid id %d completed on queue %d\n", 98983a12fb7SSagi Grimberg cqe->command_id, le16_to_cpu(cqe->sq_id)); 99083a12fb7SSagi Grimberg return; 991aae239e1SChristoph Hellwig } 992aae239e1SChristoph Hellwig 993adf68f21SChristoph Hellwig /* 994adf68f21SChristoph Hellwig * AEN requests are special as they don't time out and can 995adf68f21SChristoph Hellwig * survive any kind of queue freeze and often don't respond to 996adf68f21SChristoph Hellwig * aborts. We don't even bother to allocate a struct request 997adf68f21SChristoph Hellwig * for them but rather special case them here. 998adf68f21SChristoph Hellwig */ 999adf68f21SChristoph Hellwig if (unlikely(nvmeq->qid == 0 && 100038dabe21SKeith Busch cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) { 10017bf58533SChristoph Hellwig nvme_complete_async_event(&nvmeq->dev->ctrl, 100283a12fb7SSagi Grimberg cqe->status, &cqe->result); 1003a0fa9647SJens Axboe return; 100457dacad5SJay Sternberg } 100557dacad5SJay Sternberg 100683a12fb7SSagi Grimberg req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); 1007604c01d5Syupeng trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 100883a12fb7SSagi Grimberg nvme_end_request(req, cqe->status, cqe->result); 100983a12fb7SSagi Grimberg } 101057dacad5SJay Sternberg 10115cb525c8SJens Axboe static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end) 101283a12fb7SSagi Grimberg { 10135cb525c8SJens Axboe while (start != end) { 10145cb525c8SJens Axboe nvme_handle_cqe(nvmeq, start); 10155cb525c8SJens Axboe if (++start == nvmeq->q_depth) 10165cb525c8SJens Axboe start = 0; 10175cb525c8SJens Axboe } 10185cb525c8SJens Axboe } 101983a12fb7SSagi Grimberg 10205cb525c8SJens Axboe static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 10215cb525c8SJens Axboe { 1022920d13a8SSagi Grimberg if (++nvmeq->cq_head == nvmeq->q_depth) { 1023920d13a8SSagi Grimberg nvmeq->cq_head = 0; 1024920d13a8SSagi Grimberg nvmeq->cq_phase = !nvmeq->cq_phase; 1025920d13a8SSagi Grimberg } 1026a0fa9647SJens Axboe } 1027a0fa9647SJens Axboe 10281052b8acSJens Axboe static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start, 10291052b8acSJens Axboe u16 *end, unsigned int tag) 1030a0fa9647SJens Axboe { 10311052b8acSJens Axboe int found = 0; 103283a12fb7SSagi Grimberg 10335cb525c8SJens Axboe *start = nvmeq->cq_head; 10341052b8acSJens Axboe while (nvme_cqe_pending(nvmeq)) { 10351052b8acSJens Axboe if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag) 10361052b8acSJens Axboe found++; 10375cb525c8SJens Axboe nvme_update_cq_head(nvmeq); 103857dacad5SJay Sternberg } 10395cb525c8SJens Axboe *end = nvmeq->cq_head; 104057dacad5SJay Sternberg 10415cb525c8SJens Axboe if (*start != *end) 1042eb281c82SSagi Grimberg nvme_ring_cq_doorbell(nvmeq); 10435cb525c8SJens Axboe return found; 104457dacad5SJay Sternberg } 104557dacad5SJay Sternberg 104657dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 104757dacad5SJay Sternberg { 104857dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 104968fa9dbeSJens Axboe irqreturn_t ret = IRQ_NONE; 10505cb525c8SJens Axboe u16 start, end; 10515cb525c8SJens Axboe 10523a7afd8eSChristoph Hellwig /* 10533a7afd8eSChristoph Hellwig * The rmb/wmb pair ensures we see all updates from a previous run of 10543a7afd8eSChristoph Hellwig * the irq handler, even if that was on another CPU. 10553a7afd8eSChristoph Hellwig */ 10563a7afd8eSChristoph Hellwig rmb(); 105768fa9dbeSJens Axboe if (nvmeq->cq_head != nvmeq->last_cq_head) 105868fa9dbeSJens Axboe ret = IRQ_HANDLED; 10595cb525c8SJens Axboe nvme_process_cq(nvmeq, &start, &end, -1); 106068fa9dbeSJens Axboe nvmeq->last_cq_head = nvmeq->cq_head; 10613a7afd8eSChristoph Hellwig wmb(); 10625cb525c8SJens Axboe 106368fa9dbeSJens Axboe if (start != end) { 10645cb525c8SJens Axboe nvme_complete_cqes(nvmeq, start, end); 10655cb525c8SJens Axboe return IRQ_HANDLED; 106657dacad5SJay Sternberg } 106757dacad5SJay Sternberg 106868fa9dbeSJens Axboe return ret; 106957dacad5SJay Sternberg } 107057dacad5SJay Sternberg 107157dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 107257dacad5SJay Sternberg { 107357dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 1074750dde44SChristoph Hellwig if (nvme_cqe_pending(nvmeq)) 107557dacad5SJay Sternberg return IRQ_WAKE_THREAD; 1076d783e0bdSMarta Rybczynska return IRQ_NONE; 107757dacad5SJay Sternberg } 107857dacad5SJay Sternberg 10790b2a8a9fSChristoph Hellwig /* 10800b2a8a9fSChristoph Hellwig * Poll for completions any queue, including those not dedicated to polling. 10810b2a8a9fSChristoph Hellwig * Can be called from any context. 10820b2a8a9fSChristoph Hellwig */ 10830b2a8a9fSChristoph Hellwig static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag) 1084a0fa9647SJens Axboe { 10853a7afd8eSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 10865cb525c8SJens Axboe u16 start, end; 10871052b8acSJens Axboe int found; 1088a0fa9647SJens Axboe 10893a7afd8eSChristoph Hellwig /* 10903a7afd8eSChristoph Hellwig * For a poll queue we need to protect against the polling thread 10913a7afd8eSChristoph Hellwig * using the CQ lock. For normal interrupt driven threads we have 10923a7afd8eSChristoph Hellwig * to disable the interrupt to avoid racing with it. 10933a7afd8eSChristoph Hellwig */ 109491a509f8SChristoph Hellwig if (nvmeq->cq_vector == -1) { 10953a7afd8eSChristoph Hellwig spin_lock(&nvmeq->cq_poll_lock); 109691a509f8SChristoph Hellwig found = nvme_process_cq(nvmeq, &start, &end, tag); 109791a509f8SChristoph Hellwig spin_unlock(&nvmeq->cq_poll_lock); 109891a509f8SChristoph Hellwig } else { 10993a7afd8eSChristoph Hellwig disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 11005cb525c8SJens Axboe found = nvme_process_cq(nvmeq, &start, &end, tag); 11013a7afd8eSChristoph Hellwig enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 110291a509f8SChristoph Hellwig } 1103442e19b7SSagi Grimberg 11045cb525c8SJens Axboe nvme_complete_cqes(nvmeq, start, end); 1105442e19b7SSagi Grimberg return found; 1106a0fa9647SJens Axboe } 1107a0fa9647SJens Axboe 11089743139cSJens Axboe static int nvme_poll(struct blk_mq_hw_ctx *hctx) 11097776db1cSKeith Busch { 11107776db1cSKeith Busch struct nvme_queue *nvmeq = hctx->driver_data; 1111dabcefabSJens Axboe u16 start, end; 1112dabcefabSJens Axboe bool found; 1113dabcefabSJens Axboe 1114dabcefabSJens Axboe if (!nvme_cqe_pending(nvmeq)) 1115dabcefabSJens Axboe return 0; 1116dabcefabSJens Axboe 11173a7afd8eSChristoph Hellwig spin_lock(&nvmeq->cq_poll_lock); 11189743139cSJens Axboe found = nvme_process_cq(nvmeq, &start, &end, -1); 11193a7afd8eSChristoph Hellwig spin_unlock(&nvmeq->cq_poll_lock); 1120dabcefabSJens Axboe 1121dabcefabSJens Axboe nvme_complete_cqes(nvmeq, start, end); 1122dabcefabSJens Axboe return found; 1123dabcefabSJens Axboe } 1124dabcefabSJens Axboe 1125ad22c355SKeith Busch static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 112657dacad5SJay Sternberg { 1127f866fc42SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 1128147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 112957dacad5SJay Sternberg struct nvme_command c; 113057dacad5SJay Sternberg 113157dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 113257dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 1133ad22c355SKeith Busch c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 113404f3eafdSJens Axboe nvme_submit_cmd(nvmeq, &c, true); 113557dacad5SJay Sternberg } 113657dacad5SJay Sternberg 113757dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 113857dacad5SJay Sternberg { 113957dacad5SJay Sternberg struct nvme_command c; 114057dacad5SJay Sternberg 114157dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 114257dacad5SJay Sternberg c.delete_queue.opcode = opcode; 114357dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 114457dacad5SJay Sternberg 11451c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 114657dacad5SJay Sternberg } 114757dacad5SJay Sternberg 114857dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1149a8e3e0bbSJianchao Wang struct nvme_queue *nvmeq, s16 vector) 115057dacad5SJay Sternberg { 115157dacad5SJay Sternberg struct nvme_command c; 11524b04cc6aSJens Axboe int flags = NVME_QUEUE_PHYS_CONTIG; 11534b04cc6aSJens Axboe 11544b04cc6aSJens Axboe if (vector != -1) 11554b04cc6aSJens Axboe flags |= NVME_CQ_IRQ_ENABLED; 115657dacad5SJay Sternberg 115757dacad5SJay Sternberg /* 115816772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 115957dacad5SJay Sternberg * is attached to the request. 116057dacad5SJay Sternberg */ 116157dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 116257dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 116357dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 116457dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 116557dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 116657dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 11674b04cc6aSJens Axboe if (vector != -1) 1168a8e3e0bbSJianchao Wang c.create_cq.irq_vector = cpu_to_le16(vector); 11694b04cc6aSJens Axboe else 11704b04cc6aSJens Axboe c.create_cq.irq_vector = 0; 117157dacad5SJay Sternberg 11721c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 117357dacad5SJay Sternberg } 117457dacad5SJay Sternberg 117557dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 117657dacad5SJay Sternberg struct nvme_queue *nvmeq) 117757dacad5SJay Sternberg { 11789abd68efSJens Axboe struct nvme_ctrl *ctrl = &dev->ctrl; 117957dacad5SJay Sternberg struct nvme_command c; 118081c1cd98SKeith Busch int flags = NVME_QUEUE_PHYS_CONTIG; 118157dacad5SJay Sternberg 118257dacad5SJay Sternberg /* 11839abd68efSJens Axboe * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 11849abd68efSJens Axboe * set. Since URGENT priority is zeroes, it makes all queues 11859abd68efSJens Axboe * URGENT. 11869abd68efSJens Axboe */ 11879abd68efSJens Axboe if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 11889abd68efSJens Axboe flags |= NVME_SQ_PRIO_MEDIUM; 11899abd68efSJens Axboe 11909abd68efSJens Axboe /* 119116772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 119257dacad5SJay Sternberg * is attached to the request. 119357dacad5SJay Sternberg */ 119457dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 119557dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 119657dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 119757dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 119857dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 119957dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 120057dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 120157dacad5SJay Sternberg 12021c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 120357dacad5SJay Sternberg } 120457dacad5SJay Sternberg 120557dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 120657dacad5SJay Sternberg { 120757dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 120857dacad5SJay Sternberg } 120957dacad5SJay Sternberg 121057dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 121157dacad5SJay Sternberg { 121257dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 121357dacad5SJay Sternberg } 121457dacad5SJay Sternberg 12152a842acaSChristoph Hellwig static void abort_endio(struct request *req, blk_status_t error) 121657dacad5SJay Sternberg { 1217f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1218f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 121957dacad5SJay Sternberg 122027fa9bc5SChristoph Hellwig dev_warn(nvmeq->dev->ctrl.device, 122127fa9bc5SChristoph Hellwig "Abort status: 0x%x", nvme_req(req)->status); 1222e7a2a87dSChristoph Hellwig atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1223e7a2a87dSChristoph Hellwig blk_mq_free_request(req); 122457dacad5SJay Sternberg } 122557dacad5SJay Sternberg 1226b2a0eb1aSKeith Busch static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1227b2a0eb1aSKeith Busch { 1228b2a0eb1aSKeith Busch 1229b2a0eb1aSKeith Busch /* If true, indicates loss of adapter communication, possibly by a 1230b2a0eb1aSKeith Busch * NVMe Subsystem reset. 1231b2a0eb1aSKeith Busch */ 1232b2a0eb1aSKeith Busch bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1233b2a0eb1aSKeith Busch 1234ad70062cSJianchao Wang /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1235ad70062cSJianchao Wang switch (dev->ctrl.state) { 1236ad70062cSJianchao Wang case NVME_CTRL_RESETTING: 1237ad6a0a52SMax Gurtovoy case NVME_CTRL_CONNECTING: 1238b2a0eb1aSKeith Busch return false; 1239ad70062cSJianchao Wang default: 1240ad70062cSJianchao Wang break; 1241ad70062cSJianchao Wang } 1242b2a0eb1aSKeith Busch 1243b2a0eb1aSKeith Busch /* We shouldn't reset unless the controller is on fatal error state 1244b2a0eb1aSKeith Busch * _or_ if we lost the communication with it. 1245b2a0eb1aSKeith Busch */ 1246b2a0eb1aSKeith Busch if (!(csts & NVME_CSTS_CFS) && !nssro) 1247b2a0eb1aSKeith Busch return false; 1248b2a0eb1aSKeith Busch 1249b2a0eb1aSKeith Busch return true; 1250b2a0eb1aSKeith Busch } 1251b2a0eb1aSKeith Busch 1252b2a0eb1aSKeith Busch static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1253b2a0eb1aSKeith Busch { 1254b2a0eb1aSKeith Busch /* Read a config register to help see what died. */ 1255b2a0eb1aSKeith Busch u16 pci_status; 1256b2a0eb1aSKeith Busch int result; 1257b2a0eb1aSKeith Busch 1258b2a0eb1aSKeith Busch result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1259b2a0eb1aSKeith Busch &pci_status); 1260b2a0eb1aSKeith Busch if (result == PCIBIOS_SUCCESSFUL) 1261b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1262b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1263b2a0eb1aSKeith Busch csts, pci_status); 1264b2a0eb1aSKeith Busch else 1265b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1266b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1267b2a0eb1aSKeith Busch csts, result); 1268b2a0eb1aSKeith Busch } 1269b2a0eb1aSKeith Busch 127031c7c7d2SChristoph Hellwig static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 127157dacad5SJay Sternberg { 1272f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1273f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 127457dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 127557dacad5SJay Sternberg struct request *abort_req; 127657dacad5SJay Sternberg struct nvme_command cmd; 1277b2a0eb1aSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 1278b2a0eb1aSKeith Busch 1279651438bbSWen Xiong /* If PCI error recovery process is happening, we cannot reset or 1280651438bbSWen Xiong * the recovery mechanism will surely fail. 1281651438bbSWen Xiong */ 1282651438bbSWen Xiong mb(); 1283651438bbSWen Xiong if (pci_channel_offline(to_pci_dev(dev->dev))) 1284651438bbSWen Xiong return BLK_EH_RESET_TIMER; 1285651438bbSWen Xiong 1286b2a0eb1aSKeith Busch /* 1287b2a0eb1aSKeith Busch * Reset immediately if the controller is failed 1288b2a0eb1aSKeith Busch */ 1289b2a0eb1aSKeith Busch if (nvme_should_reset(dev, csts)) { 1290b2a0eb1aSKeith Busch nvme_warn_reset(dev, csts); 1291b2a0eb1aSKeith Busch nvme_dev_disable(dev, false); 1292d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1293db8c48e4SChristoph Hellwig return BLK_EH_DONE; 1294b2a0eb1aSKeith Busch } 129557dacad5SJay Sternberg 129631c7c7d2SChristoph Hellwig /* 12977776db1cSKeith Busch * Did we miss an interrupt? 12987776db1cSKeith Busch */ 12990b2a8a9fSChristoph Hellwig if (nvme_poll_irqdisable(nvmeq, req->tag)) { 13007776db1cSKeith Busch dev_warn(dev->ctrl.device, 13017776db1cSKeith Busch "I/O %d QID %d timeout, completion polled\n", 13027776db1cSKeith Busch req->tag, nvmeq->qid); 1303db8c48e4SChristoph Hellwig return BLK_EH_DONE; 13047776db1cSKeith Busch } 13057776db1cSKeith Busch 13067776db1cSKeith Busch /* 1307fd634f41SChristoph Hellwig * Shutdown immediately if controller times out while starting. The 1308fd634f41SChristoph Hellwig * reset work will see the pci device disabled when it gets the forced 1309fd634f41SChristoph Hellwig * cancellation error. All outstanding requests are completed on 1310db8c48e4SChristoph Hellwig * shutdown, so we return BLK_EH_DONE. 1311fd634f41SChristoph Hellwig */ 13124244140dSKeith Busch switch (dev->ctrl.state) { 13134244140dSKeith Busch case NVME_CTRL_CONNECTING: 13144244140dSKeith Busch case NVME_CTRL_RESETTING: 1315b9cac43cSKeith Busch dev_warn_ratelimited(dev->ctrl.device, 1316fd634f41SChristoph Hellwig "I/O %d QID %d timeout, disable controller\n", 1317fd634f41SChristoph Hellwig req->tag, nvmeq->qid); 1318a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 131927fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1320db8c48e4SChristoph Hellwig return BLK_EH_DONE; 13214244140dSKeith Busch default: 13224244140dSKeith Busch break; 1323fd634f41SChristoph Hellwig } 1324fd634f41SChristoph Hellwig 1325fd634f41SChristoph Hellwig /* 1326e1569a16SKeith Busch * Shutdown the controller immediately and schedule a reset if the 1327e1569a16SKeith Busch * command was already aborted once before and still hasn't been 1328e1569a16SKeith Busch * returned to the driver, or if this is the admin queue. 132931c7c7d2SChristoph Hellwig */ 1330f4800d6dSChristoph Hellwig if (!nvmeq->qid || iod->aborted) { 13311b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 133257dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 133357dacad5SJay Sternberg req->tag, nvmeq->qid); 1334a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 1335d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1336e1569a16SKeith Busch 133727fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1338db8c48e4SChristoph Hellwig return BLK_EH_DONE; 133957dacad5SJay Sternberg } 134057dacad5SJay Sternberg 1341e7a2a87dSChristoph Hellwig if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1342e7a2a87dSChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 1343e7a2a87dSChristoph Hellwig return BLK_EH_RESET_TIMER; 1344e7a2a87dSChristoph Hellwig } 13457bf7d778SKeith Busch iod->aborted = 1; 134657dacad5SJay Sternberg 134757dacad5SJay Sternberg memset(&cmd, 0, sizeof(cmd)); 134857dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 134957dacad5SJay Sternberg cmd.abort.cid = req->tag; 135057dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 135157dacad5SJay Sternberg 13521b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 13531b3c47c1SSagi Grimberg "I/O %d QID %d timeout, aborting\n", 135457dacad5SJay Sternberg req->tag, nvmeq->qid); 1355e7a2a87dSChristoph Hellwig 1356e7a2a87dSChristoph Hellwig abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, 1357eb71f435SChristoph Hellwig BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 13586bf25d16SChristoph Hellwig if (IS_ERR(abort_req)) { 13596bf25d16SChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 136031c7c7d2SChristoph Hellwig return BLK_EH_RESET_TIMER; 136157dacad5SJay Sternberg } 136257dacad5SJay Sternberg 1363e7a2a87dSChristoph Hellwig abort_req->timeout = ADMIN_TIMEOUT; 1364e7a2a87dSChristoph Hellwig abort_req->end_io_data = NULL; 1365e7a2a87dSChristoph Hellwig blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); 136657dacad5SJay Sternberg 136757dacad5SJay Sternberg /* 136857dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 136957dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 137057dacad5SJay Sternberg * as the device then is in a faulty state. 137157dacad5SJay Sternberg */ 137257dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 137357dacad5SJay Sternberg } 137457dacad5SJay Sternberg 137557dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 137657dacad5SJay Sternberg { 137757dacad5SJay Sternberg dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 137857dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 137963223078SChristoph Hellwig if (!nvmeq->sq_cmds) 138063223078SChristoph Hellwig return; 13810f238ff5SLogan Gunthorpe 138263223078SChristoph Hellwig if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 13830f238ff5SLogan Gunthorpe pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev), 138463223078SChristoph Hellwig nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth)); 138563223078SChristoph Hellwig } else { 138663223078SChristoph Hellwig dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 138763223078SChristoph Hellwig nvmeq->sq_cmds, nvmeq->sq_dma_addr); 13880f238ff5SLogan Gunthorpe } 138957dacad5SJay Sternberg } 139057dacad5SJay Sternberg 139157dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 139257dacad5SJay Sternberg { 139357dacad5SJay Sternberg int i; 139457dacad5SJay Sternberg 1395d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1396d858e5f0SSagi Grimberg dev->ctrl.queue_count--; 1397147b27e4SSagi Grimberg nvme_free_queue(&dev->queues[i]); 139857dacad5SJay Sternberg } 139957dacad5SJay Sternberg } 140057dacad5SJay Sternberg 140157dacad5SJay Sternberg /** 140257dacad5SJay Sternberg * nvme_suspend_queue - put queue into suspended state 140340581d1aSBart Van Assche * @nvmeq: queue to suspend 140457dacad5SJay Sternberg */ 140557dacad5SJay Sternberg static int nvme_suspend_queue(struct nvme_queue *nvmeq) 140657dacad5SJay Sternberg { 14074e224106SChristoph Hellwig if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 140857dacad5SJay Sternberg return 1; 140957dacad5SJay Sternberg 14104e224106SChristoph Hellwig /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1411d1f06f4aSJens Axboe mb(); 141257dacad5SJay Sternberg 14134e224106SChristoph Hellwig nvmeq->dev->online_queues--; 14141c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 1415c81545f9SSagi Grimberg blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); 14164e224106SChristoph Hellwig if (nvmeq->cq_vector == -1) 14174e224106SChristoph Hellwig return 0; 14184e224106SChristoph Hellwig pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); 14194e224106SChristoph Hellwig nvmeq->cq_vector = -1; 142057dacad5SJay Sternberg return 0; 142157dacad5SJay Sternberg } 142257dacad5SJay Sternberg 1423a5cdb68cSKeith Busch static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 142457dacad5SJay Sternberg { 1425147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 142657dacad5SJay Sternberg 1427a5cdb68cSKeith Busch if (shutdown) 1428a5cdb68cSKeith Busch nvme_shutdown_ctrl(&dev->ctrl); 1429a5cdb68cSKeith Busch else 143020d0dfe6SSagi Grimberg nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); 143157dacad5SJay Sternberg 14320b2a8a9fSChristoph Hellwig nvme_poll_irqdisable(nvmeq, -1); 143357dacad5SJay Sternberg } 143457dacad5SJay Sternberg 143557dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 143657dacad5SJay Sternberg int entry_size) 143757dacad5SJay Sternberg { 143857dacad5SJay Sternberg int q_depth = dev->q_depth; 14395fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 14405fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 144157dacad5SJay Sternberg 144257dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 144357dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 14445fd4ce1bSChristoph Hellwig mem_per_q = round_down(mem_per_q, dev->ctrl.page_size); 144557dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 144657dacad5SJay Sternberg 144757dacad5SJay Sternberg /* 144857dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 144957dacad5SJay Sternberg * would be better to map queues in system memory with the 145057dacad5SJay Sternberg * original depth 145157dacad5SJay Sternberg */ 145257dacad5SJay Sternberg if (q_depth < 64) 145357dacad5SJay Sternberg return -ENOMEM; 145457dacad5SJay Sternberg } 145557dacad5SJay Sternberg 145657dacad5SJay Sternberg return q_depth; 145757dacad5SJay Sternberg } 145857dacad5SJay Sternberg 145957dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 146057dacad5SJay Sternberg int qid, int depth) 146157dacad5SJay Sternberg { 14620f238ff5SLogan Gunthorpe struct pci_dev *pdev = to_pci_dev(dev->dev); 1463815c6704SKeith Busch 14640f238ff5SLogan Gunthorpe if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 14650f238ff5SLogan Gunthorpe nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth)); 14660f238ff5SLogan Gunthorpe nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 14670f238ff5SLogan Gunthorpe nvmeq->sq_cmds); 146863223078SChristoph Hellwig if (nvmeq->sq_dma_addr) { 146963223078SChristoph Hellwig set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 147063223078SChristoph Hellwig return 0; 147163223078SChristoph Hellwig } 14720f238ff5SLogan Gunthorpe } 14730f238ff5SLogan Gunthorpe 147457dacad5SJay Sternberg nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 147557dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 147657dacad5SJay Sternberg if (!nvmeq->sq_cmds) 147757dacad5SJay Sternberg return -ENOMEM; 147857dacad5SJay Sternberg return 0; 147957dacad5SJay Sternberg } 148057dacad5SJay Sternberg 1481a6ff7262SKeith Busch static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 148257dacad5SJay Sternberg { 1483147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[qid]; 148457dacad5SJay Sternberg 148562314e40SKeith Busch if (dev->ctrl.queue_count > qid) 148662314e40SKeith Busch return 0; 148757dacad5SJay Sternberg 148857dacad5SJay Sternberg nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), 148957dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 149057dacad5SJay Sternberg if (!nvmeq->cqes) 149157dacad5SJay Sternberg goto free_nvmeq; 149257dacad5SJay Sternberg 149357dacad5SJay Sternberg if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) 149457dacad5SJay Sternberg goto free_cqdma; 149557dacad5SJay Sternberg 149657dacad5SJay Sternberg nvmeq->q_dmadev = dev->dev; 149757dacad5SJay Sternberg nvmeq->dev = dev; 14981ab0cd69SJens Axboe spin_lock_init(&nvmeq->sq_lock); 14993a7afd8eSChristoph Hellwig spin_lock_init(&nvmeq->cq_poll_lock); 150057dacad5SJay Sternberg nvmeq->cq_head = 0; 150157dacad5SJay Sternberg nvmeq->cq_phase = 1; 150257dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 150357dacad5SJay Sternberg nvmeq->q_depth = depth; 150457dacad5SJay Sternberg nvmeq->qid = qid; 150557dacad5SJay Sternberg nvmeq->cq_vector = -1; 1506d858e5f0SSagi Grimberg dev->ctrl.queue_count++; 150757dacad5SJay Sternberg 1508147b27e4SSagi Grimberg return 0; 150957dacad5SJay Sternberg 151057dacad5SJay Sternberg free_cqdma: 151157dacad5SJay Sternberg dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, 151257dacad5SJay Sternberg nvmeq->cq_dma_addr); 151357dacad5SJay Sternberg free_nvmeq: 1514147b27e4SSagi Grimberg return -ENOMEM; 151557dacad5SJay Sternberg } 151657dacad5SJay Sternberg 1517dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq) 151857dacad5SJay Sternberg { 15190ff199cbSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 15200ff199cbSChristoph Hellwig int nr = nvmeq->dev->ctrl.instance; 15210ff199cbSChristoph Hellwig 15220ff199cbSChristoph Hellwig if (use_threaded_interrupts) { 15230ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 15240ff199cbSChristoph Hellwig nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 15250ff199cbSChristoph Hellwig } else { 15260ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 15270ff199cbSChristoph Hellwig NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 15280ff199cbSChristoph Hellwig } 152957dacad5SJay Sternberg } 153057dacad5SJay Sternberg 153157dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 153257dacad5SJay Sternberg { 153357dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 153457dacad5SJay Sternberg 153557dacad5SJay Sternberg nvmeq->sq_tail = 0; 153604f3eafdSJens Axboe nvmeq->last_sq_tail = 0; 153757dacad5SJay Sternberg nvmeq->cq_head = 0; 153857dacad5SJay Sternberg nvmeq->cq_phase = 1; 153957dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 154057dacad5SJay Sternberg memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 1541f9f38e33SHelen Koike nvme_dbbuf_init(dev, nvmeq, qid); 154257dacad5SJay Sternberg dev->online_queues++; 15433a7afd8eSChristoph Hellwig wmb(); /* ensure the first interrupt sees the initialization */ 154457dacad5SJay Sternberg } 154557dacad5SJay Sternberg 15464b04cc6aSJens Axboe static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 154757dacad5SJay Sternberg { 154857dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 154957dacad5SJay Sternberg int result; 1550a8e3e0bbSJianchao Wang s16 vector; 155157dacad5SJay Sternberg 1552d1ed6aa1SChristoph Hellwig clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1553d1ed6aa1SChristoph Hellwig 155422b55601SKeith Busch /* 155522b55601SKeith Busch * A queue's vector matches the queue identifier unless the controller 155622b55601SKeith Busch * has only one vector available. 155722b55601SKeith Busch */ 15584b04cc6aSJens Axboe if (!polled) 1559a8e3e0bbSJianchao Wang vector = dev->num_vecs == 1 ? 0 : qid; 15604b04cc6aSJens Axboe else 15614b04cc6aSJens Axboe vector = -1; 15624b04cc6aSJens Axboe 1563a8e3e0bbSJianchao Wang result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1564ded45505SKeith Busch if (result) 1565ded45505SKeith Busch return result; 156657dacad5SJay Sternberg 156757dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 156857dacad5SJay Sternberg if (result < 0) 1569ded45505SKeith Busch return result; 1570ded45505SKeith Busch else if (result) 157157dacad5SJay Sternberg goto release_cq; 157257dacad5SJay Sternberg 1573a8e3e0bbSJianchao Wang nvmeq->cq_vector = vector; 1574161b8be2SKeith Busch nvme_init_queue(nvmeq, qid); 15754b04cc6aSJens Axboe 15764b04cc6aSJens Axboe if (vector != -1) { 1577dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 157857dacad5SJay Sternberg if (result < 0) 157957dacad5SJay Sternberg goto release_sq; 15804b04cc6aSJens Axboe } 158157dacad5SJay Sternberg 15824e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 158357dacad5SJay Sternberg return result; 158457dacad5SJay Sternberg 158557dacad5SJay Sternberg release_sq: 1586a8e3e0bbSJianchao Wang nvmeq->cq_vector = -1; 1587f25a2dfcSJianchao Wang dev->online_queues--; 158857dacad5SJay Sternberg adapter_delete_sq(dev, qid); 158957dacad5SJay Sternberg release_cq: 159057dacad5SJay Sternberg adapter_delete_cq(dev, qid); 159157dacad5SJay Sternberg return result; 159257dacad5SJay Sternberg } 159357dacad5SJay Sternberg 1594f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = { 159557dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 159677f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 159757dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 159857dacad5SJay Sternberg .exit_hctx = nvme_admin_exit_hctx, 15990350815aSChristoph Hellwig .init_request = nvme_init_request, 160057dacad5SJay Sternberg .timeout = nvme_timeout, 160157dacad5SJay Sternberg }; 160257dacad5SJay Sternberg 1603f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = { 1604376f7ef8SChristoph Hellwig .queue_rq = nvme_queue_rq, 1605376f7ef8SChristoph Hellwig .complete = nvme_pci_complete_rq, 1606376f7ef8SChristoph Hellwig .commit_rqs = nvme_commit_rqs, 1607376f7ef8SChristoph Hellwig .init_hctx = nvme_init_hctx, 1608376f7ef8SChristoph Hellwig .init_request = nvme_init_request, 1609376f7ef8SChristoph Hellwig .map_queues = nvme_pci_map_queues, 1610376f7ef8SChristoph Hellwig .timeout = nvme_timeout, 1611c6d962aeSChristoph Hellwig .poll = nvme_poll, 1612dabcefabSJens Axboe }; 1613dabcefabSJens Axboe 161457dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 161557dacad5SJay Sternberg { 16161c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 161769d9a99cSKeith Busch /* 161869d9a99cSKeith Busch * If the controller was reset during removal, it's possible 161969d9a99cSKeith Busch * user requests may be waiting on a stopped queue. Start the 162069d9a99cSKeith Busch * queue to flush these to completion. 162169d9a99cSKeith Busch */ 1622c81545f9SSagi Grimberg blk_mq_unquiesce_queue(dev->ctrl.admin_q); 16231c63dc66SChristoph Hellwig blk_cleanup_queue(dev->ctrl.admin_q); 162457dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 162557dacad5SJay Sternberg } 162657dacad5SJay Sternberg } 162757dacad5SJay Sternberg 162857dacad5SJay Sternberg static int nvme_alloc_admin_tags(struct nvme_dev *dev) 162957dacad5SJay Sternberg { 16301c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q) { 163157dacad5SJay Sternberg dev->admin_tagset.ops = &nvme_mq_admin_ops; 163257dacad5SJay Sternberg dev->admin_tagset.nr_hw_queues = 1; 1633e3e9d50cSKeith Busch 163438dabe21SKeith Busch dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 163557dacad5SJay Sternberg dev->admin_tagset.timeout = ADMIN_TIMEOUT; 163657dacad5SJay Sternberg dev->admin_tagset.numa_node = dev_to_node(dev->dev); 1637a7a7cbe3SChaitanya Kulkarni dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false); 1638d3484991SJens Axboe dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; 163957dacad5SJay Sternberg dev->admin_tagset.driver_data = dev; 164057dacad5SJay Sternberg 164157dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 164257dacad5SJay Sternberg return -ENOMEM; 164334b6c231SSagi Grimberg dev->ctrl.admin_tagset = &dev->admin_tagset; 164457dacad5SJay Sternberg 16451c63dc66SChristoph Hellwig dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); 16461c63dc66SChristoph Hellwig if (IS_ERR(dev->ctrl.admin_q)) { 164757dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 164857dacad5SJay Sternberg return -ENOMEM; 164957dacad5SJay Sternberg } 16501c63dc66SChristoph Hellwig if (!blk_get_queue(dev->ctrl.admin_q)) { 165157dacad5SJay Sternberg nvme_dev_remove_admin(dev); 16521c63dc66SChristoph Hellwig dev->ctrl.admin_q = NULL; 165357dacad5SJay Sternberg return -ENODEV; 165457dacad5SJay Sternberg } 165557dacad5SJay Sternberg } else 1656c81545f9SSagi Grimberg blk_mq_unquiesce_queue(dev->ctrl.admin_q); 165757dacad5SJay Sternberg 165857dacad5SJay Sternberg return 0; 165957dacad5SJay Sternberg } 166057dacad5SJay Sternberg 166197f6ef64SXu Yu static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 166297f6ef64SXu Yu { 166397f6ef64SXu Yu return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 166497f6ef64SXu Yu } 166597f6ef64SXu Yu 166697f6ef64SXu Yu static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 166797f6ef64SXu Yu { 166897f6ef64SXu Yu struct pci_dev *pdev = to_pci_dev(dev->dev); 166997f6ef64SXu Yu 167097f6ef64SXu Yu if (size <= dev->bar_mapped_size) 167197f6ef64SXu Yu return 0; 167297f6ef64SXu Yu if (size > pci_resource_len(pdev, 0)) 167397f6ef64SXu Yu return -ENOMEM; 167497f6ef64SXu Yu if (dev->bar) 167597f6ef64SXu Yu iounmap(dev->bar); 167697f6ef64SXu Yu dev->bar = ioremap(pci_resource_start(pdev, 0), size); 167797f6ef64SXu Yu if (!dev->bar) { 167897f6ef64SXu Yu dev->bar_mapped_size = 0; 167997f6ef64SXu Yu return -ENOMEM; 168097f6ef64SXu Yu } 168197f6ef64SXu Yu dev->bar_mapped_size = size; 168297f6ef64SXu Yu dev->dbs = dev->bar + NVME_REG_DBS; 168397f6ef64SXu Yu 168497f6ef64SXu Yu return 0; 168597f6ef64SXu Yu } 168697f6ef64SXu Yu 168701ad0990SSagi Grimberg static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 168857dacad5SJay Sternberg { 168957dacad5SJay Sternberg int result; 169057dacad5SJay Sternberg u32 aqa; 169157dacad5SJay Sternberg struct nvme_queue *nvmeq; 169257dacad5SJay Sternberg 169397f6ef64SXu Yu result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 169497f6ef64SXu Yu if (result < 0) 169597f6ef64SXu Yu return result; 169697f6ef64SXu Yu 16978ef2074dSGabriel Krisman Bertazi dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 169820d0dfe6SSagi Grimberg NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 169957dacad5SJay Sternberg 17007a67cbeaSChristoph Hellwig if (dev->subsystem && 17017a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 17027a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 170357dacad5SJay Sternberg 170420d0dfe6SSagi Grimberg result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); 170557dacad5SJay Sternberg if (result < 0) 170657dacad5SJay Sternberg return result; 170757dacad5SJay Sternberg 1708a6ff7262SKeith Busch result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1709147b27e4SSagi Grimberg if (result) 1710147b27e4SSagi Grimberg return result; 171157dacad5SJay Sternberg 1712147b27e4SSagi Grimberg nvmeq = &dev->queues[0]; 171357dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 171457dacad5SJay Sternberg aqa |= aqa << 16; 171557dacad5SJay Sternberg 17167a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 17177a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 17187a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 171957dacad5SJay Sternberg 172020d0dfe6SSagi Grimberg result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap); 172157dacad5SJay Sternberg if (result) 1722d4875622SKeith Busch return result; 172357dacad5SJay Sternberg 172457dacad5SJay Sternberg nvmeq->cq_vector = 0; 1725161b8be2SKeith Busch nvme_init_queue(nvmeq, 0); 1726dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 172757dacad5SJay Sternberg if (result) { 172857dacad5SJay Sternberg nvmeq->cq_vector = -1; 1729d4875622SKeith Busch return result; 173057dacad5SJay Sternberg } 173157dacad5SJay Sternberg 17324e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 173357dacad5SJay Sternberg return result; 173457dacad5SJay Sternberg } 173557dacad5SJay Sternberg 1736749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev) 173757dacad5SJay Sternberg { 17384b04cc6aSJens Axboe unsigned i, max, rw_queues; 1739749941f2SChristoph Hellwig int ret = 0; 174057dacad5SJay Sternberg 1741d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1742a6ff7262SKeith Busch if (nvme_alloc_queue(dev, i, dev->q_depth)) { 1743749941f2SChristoph Hellwig ret = -ENOMEM; 174457dacad5SJay Sternberg break; 1745749941f2SChristoph Hellwig } 1746749941f2SChristoph Hellwig } 174757dacad5SJay Sternberg 1748d858e5f0SSagi Grimberg max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1749e20ba6e1SChristoph Hellwig if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 1750e20ba6e1SChristoph Hellwig rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 1751e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_READ]; 17524b04cc6aSJens Axboe } else { 17534b04cc6aSJens Axboe rw_queues = max; 17544b04cc6aSJens Axboe } 17554b04cc6aSJens Axboe 1756949928c1SKeith Busch for (i = dev->online_queues; i <= max; i++) { 17574b04cc6aSJens Axboe bool polled = i > rw_queues; 17584b04cc6aSJens Axboe 17594b04cc6aSJens Axboe ret = nvme_create_queue(&dev->queues[i], i, polled); 1760d4875622SKeith Busch if (ret) 176157dacad5SJay Sternberg break; 176257dacad5SJay Sternberg } 176357dacad5SJay Sternberg 1764749941f2SChristoph Hellwig /* 1765749941f2SChristoph Hellwig * Ignore failing Create SQ/CQ commands, we can continue with less 17668adb8c14SMinwoo Im * than the desired amount of queues, and even a controller without 17678adb8c14SMinwoo Im * I/O queues can still be used to issue admin commands. This might 1768749941f2SChristoph Hellwig * be useful to upgrade a buggy firmware for example. 1769749941f2SChristoph Hellwig */ 1770749941f2SChristoph Hellwig return ret >= 0 ? 0 : ret; 177157dacad5SJay Sternberg } 177257dacad5SJay Sternberg 1773202021c1SStephen Bates static ssize_t nvme_cmb_show(struct device *dev, 1774202021c1SStephen Bates struct device_attribute *attr, 1775202021c1SStephen Bates char *buf) 1776202021c1SStephen Bates { 1777202021c1SStephen Bates struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 1778202021c1SStephen Bates 1779c965809cSStephen Bates return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", 1780202021c1SStephen Bates ndev->cmbloc, ndev->cmbsz); 1781202021c1SStephen Bates } 1782202021c1SStephen Bates static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); 1783202021c1SStephen Bates 178488de4598SChristoph Hellwig static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 178557dacad5SJay Sternberg { 178688de4598SChristoph Hellwig u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 178788de4598SChristoph Hellwig 178888de4598SChristoph Hellwig return 1ULL << (12 + 4 * szu); 178988de4598SChristoph Hellwig } 179088de4598SChristoph Hellwig 179188de4598SChristoph Hellwig static u32 nvme_cmb_size(struct nvme_dev *dev) 179288de4598SChristoph Hellwig { 179388de4598SChristoph Hellwig return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 179488de4598SChristoph Hellwig } 179588de4598SChristoph Hellwig 1796f65efd6dSChristoph Hellwig static void nvme_map_cmb(struct nvme_dev *dev) 179757dacad5SJay Sternberg { 179888de4598SChristoph Hellwig u64 size, offset; 179957dacad5SJay Sternberg resource_size_t bar_size; 180057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 18018969f1f8SChristoph Hellwig int bar; 180257dacad5SJay Sternberg 18039fe5c59fSKeith Busch if (dev->cmb_size) 18049fe5c59fSKeith Busch return; 18059fe5c59fSKeith Busch 18067a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1807f65efd6dSChristoph Hellwig if (!dev->cmbsz) 1808f65efd6dSChristoph Hellwig return; 1809202021c1SStephen Bates dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 181057dacad5SJay Sternberg 181188de4598SChristoph Hellwig size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 181288de4598SChristoph Hellwig offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 18138969f1f8SChristoph Hellwig bar = NVME_CMB_BIR(dev->cmbloc); 18148969f1f8SChristoph Hellwig bar_size = pci_resource_len(pdev, bar); 181557dacad5SJay Sternberg 181657dacad5SJay Sternberg if (offset > bar_size) 1817f65efd6dSChristoph Hellwig return; 181857dacad5SJay Sternberg 181957dacad5SJay Sternberg /* 182057dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 182157dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 182257dacad5SJay Sternberg * the reported size of the BAR 182357dacad5SJay Sternberg */ 182457dacad5SJay Sternberg if (size > bar_size - offset) 182557dacad5SJay Sternberg size = bar_size - offset; 182657dacad5SJay Sternberg 18270f238ff5SLogan Gunthorpe if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 18280f238ff5SLogan Gunthorpe dev_warn(dev->ctrl.device, 18290f238ff5SLogan Gunthorpe "failed to register the CMB\n"); 1830f65efd6dSChristoph Hellwig return; 18310f238ff5SLogan Gunthorpe } 18320f238ff5SLogan Gunthorpe 183357dacad5SJay Sternberg dev->cmb_size = size; 18340f238ff5SLogan Gunthorpe dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 18350f238ff5SLogan Gunthorpe 18360f238ff5SLogan Gunthorpe if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 18370f238ff5SLogan Gunthorpe (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 18380f238ff5SLogan Gunthorpe pci_p2pmem_publish(pdev, true); 1839f65efd6dSChristoph Hellwig 1840f65efd6dSChristoph Hellwig if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1841f65efd6dSChristoph Hellwig &dev_attr_cmb.attr, NULL)) 1842f65efd6dSChristoph Hellwig dev_warn(dev->ctrl.device, 1843f65efd6dSChristoph Hellwig "failed to add sysfs attribute for CMB\n"); 184457dacad5SJay Sternberg } 184557dacad5SJay Sternberg 184657dacad5SJay Sternberg static inline void nvme_release_cmb(struct nvme_dev *dev) 184757dacad5SJay Sternberg { 18480f238ff5SLogan Gunthorpe if (dev->cmb_size) { 1849f63572dfSJon Derrick sysfs_remove_file_from_group(&dev->ctrl.device->kobj, 1850f63572dfSJon Derrick &dev_attr_cmb.attr, NULL); 18510f238ff5SLogan Gunthorpe dev->cmb_size = 0; 1852f63572dfSJon Derrick } 185357dacad5SJay Sternberg } 185457dacad5SJay Sternberg 185587ad72a5SChristoph Hellwig static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 185657dacad5SJay Sternberg { 18574033f35dSChristoph Hellwig u64 dma_addr = dev->host_mem_descs_dma; 185887ad72a5SChristoph Hellwig struct nvme_command c; 185987ad72a5SChristoph Hellwig int ret; 186087ad72a5SChristoph Hellwig 186187ad72a5SChristoph Hellwig memset(&c, 0, sizeof(c)); 186287ad72a5SChristoph Hellwig c.features.opcode = nvme_admin_set_features; 186387ad72a5SChristoph Hellwig c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 186487ad72a5SChristoph Hellwig c.features.dword11 = cpu_to_le32(bits); 186587ad72a5SChristoph Hellwig c.features.dword12 = cpu_to_le32(dev->host_mem_size >> 186687ad72a5SChristoph Hellwig ilog2(dev->ctrl.page_size)); 186787ad72a5SChristoph Hellwig c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 186887ad72a5SChristoph Hellwig c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 186987ad72a5SChristoph Hellwig c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 187087ad72a5SChristoph Hellwig 187187ad72a5SChristoph Hellwig ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 187287ad72a5SChristoph Hellwig if (ret) { 187387ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 187487ad72a5SChristoph Hellwig "failed to set host mem (err %d, flags %#x).\n", 187587ad72a5SChristoph Hellwig ret, bits); 187687ad72a5SChristoph Hellwig } 187787ad72a5SChristoph Hellwig return ret; 187887ad72a5SChristoph Hellwig } 187987ad72a5SChristoph Hellwig 188087ad72a5SChristoph Hellwig static void nvme_free_host_mem(struct nvme_dev *dev) 188187ad72a5SChristoph Hellwig { 188287ad72a5SChristoph Hellwig int i; 188387ad72a5SChristoph Hellwig 188487ad72a5SChristoph Hellwig for (i = 0; i < dev->nr_host_mem_descs; i++) { 188587ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 188687ad72a5SChristoph Hellwig size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; 188787ad72a5SChristoph Hellwig 188887ad72a5SChristoph Hellwig dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i], 188987ad72a5SChristoph Hellwig le64_to_cpu(desc->addr)); 189087ad72a5SChristoph Hellwig } 189187ad72a5SChristoph Hellwig 189287ad72a5SChristoph Hellwig kfree(dev->host_mem_desc_bufs); 189387ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = NULL; 18944033f35dSChristoph Hellwig dma_free_coherent(dev->dev, 18954033f35dSChristoph Hellwig dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), 18964033f35dSChristoph Hellwig dev->host_mem_descs, dev->host_mem_descs_dma); 189787ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 18987e5dd57eSMinwoo Im dev->nr_host_mem_descs = 0; 189987ad72a5SChristoph Hellwig } 190087ad72a5SChristoph Hellwig 190192dc6895SChristoph Hellwig static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, 190292dc6895SChristoph Hellwig u32 chunk_size) 190387ad72a5SChristoph Hellwig { 190487ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *descs; 190592dc6895SChristoph Hellwig u32 max_entries, len; 19064033f35dSChristoph Hellwig dma_addr_t descs_dma; 19072ee0e4edSDan Carpenter int i = 0; 190887ad72a5SChristoph Hellwig void **bufs; 19096fbcde66SMinwoo Im u64 size, tmp; 191087ad72a5SChristoph Hellwig 191187ad72a5SChristoph Hellwig tmp = (preferred + chunk_size - 1); 191287ad72a5SChristoph Hellwig do_div(tmp, chunk_size); 191387ad72a5SChristoph Hellwig max_entries = tmp; 1914044a9df1SChristoph Hellwig 1915044a9df1SChristoph Hellwig if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1916044a9df1SChristoph Hellwig max_entries = dev->ctrl.hmmaxd; 1917044a9df1SChristoph Hellwig 19184033f35dSChristoph Hellwig descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), 19194033f35dSChristoph Hellwig &descs_dma, GFP_KERNEL); 192087ad72a5SChristoph Hellwig if (!descs) 192187ad72a5SChristoph Hellwig goto out; 192287ad72a5SChristoph Hellwig 192387ad72a5SChristoph Hellwig bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 192487ad72a5SChristoph Hellwig if (!bufs) 192587ad72a5SChristoph Hellwig goto out_free_descs; 192687ad72a5SChristoph Hellwig 1927244a8fe4SMinwoo Im for (size = 0; size < preferred && i < max_entries; size += len) { 192887ad72a5SChristoph Hellwig dma_addr_t dma_addr; 192987ad72a5SChristoph Hellwig 193050cdb7c6SChristoph Hellwig len = min_t(u64, chunk_size, preferred - size); 193187ad72a5SChristoph Hellwig bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 193287ad72a5SChristoph Hellwig DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 193387ad72a5SChristoph Hellwig if (!bufs[i]) 193487ad72a5SChristoph Hellwig break; 193587ad72a5SChristoph Hellwig 193687ad72a5SChristoph Hellwig descs[i].addr = cpu_to_le64(dma_addr); 193787ad72a5SChristoph Hellwig descs[i].size = cpu_to_le32(len / dev->ctrl.page_size); 193887ad72a5SChristoph Hellwig i++; 193987ad72a5SChristoph Hellwig } 194087ad72a5SChristoph Hellwig 194192dc6895SChristoph Hellwig if (!size) 194287ad72a5SChristoph Hellwig goto out_free_bufs; 194387ad72a5SChristoph Hellwig 194487ad72a5SChristoph Hellwig dev->nr_host_mem_descs = i; 194587ad72a5SChristoph Hellwig dev->host_mem_size = size; 194687ad72a5SChristoph Hellwig dev->host_mem_descs = descs; 19474033f35dSChristoph Hellwig dev->host_mem_descs_dma = descs_dma; 194887ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = bufs; 194987ad72a5SChristoph Hellwig return 0; 195087ad72a5SChristoph Hellwig 195187ad72a5SChristoph Hellwig out_free_bufs: 195287ad72a5SChristoph Hellwig while (--i >= 0) { 195387ad72a5SChristoph Hellwig size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; 195487ad72a5SChristoph Hellwig 195587ad72a5SChristoph Hellwig dma_free_coherent(dev->dev, size, bufs[i], 195687ad72a5SChristoph Hellwig le64_to_cpu(descs[i].addr)); 195787ad72a5SChristoph Hellwig } 195887ad72a5SChristoph Hellwig 195987ad72a5SChristoph Hellwig kfree(bufs); 196087ad72a5SChristoph Hellwig out_free_descs: 19614033f35dSChristoph Hellwig dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, 19624033f35dSChristoph Hellwig descs_dma); 196387ad72a5SChristoph Hellwig out: 196487ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 196587ad72a5SChristoph Hellwig return -ENOMEM; 196687ad72a5SChristoph Hellwig } 196787ad72a5SChristoph Hellwig 196892dc6895SChristoph Hellwig static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 196992dc6895SChristoph Hellwig { 197092dc6895SChristoph Hellwig u32 chunk_size; 197192dc6895SChristoph Hellwig 197292dc6895SChristoph Hellwig /* start big and work our way down */ 197330f92d62SAkinobu Mita for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 1974044a9df1SChristoph Hellwig chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 197592dc6895SChristoph Hellwig chunk_size /= 2) { 197692dc6895SChristoph Hellwig if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { 197792dc6895SChristoph Hellwig if (!min || dev->host_mem_size >= min) 197892dc6895SChristoph Hellwig return 0; 197992dc6895SChristoph Hellwig nvme_free_host_mem(dev); 198092dc6895SChristoph Hellwig } 198192dc6895SChristoph Hellwig } 198292dc6895SChristoph Hellwig 198392dc6895SChristoph Hellwig return -ENOMEM; 198492dc6895SChristoph Hellwig } 198592dc6895SChristoph Hellwig 19869620cfbaSChristoph Hellwig static int nvme_setup_host_mem(struct nvme_dev *dev) 198787ad72a5SChristoph Hellwig { 198887ad72a5SChristoph Hellwig u64 max = (u64)max_host_mem_size_mb * SZ_1M; 198987ad72a5SChristoph Hellwig u64 preferred = (u64)dev->ctrl.hmpre * 4096; 199087ad72a5SChristoph Hellwig u64 min = (u64)dev->ctrl.hmmin * 4096; 199187ad72a5SChristoph Hellwig u32 enable_bits = NVME_HOST_MEM_ENABLE; 19926fbcde66SMinwoo Im int ret; 199387ad72a5SChristoph Hellwig 199487ad72a5SChristoph Hellwig preferred = min(preferred, max); 199587ad72a5SChristoph Hellwig if (min > max) { 199687ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 199787ad72a5SChristoph Hellwig "min host memory (%lld MiB) above limit (%d MiB).\n", 199887ad72a5SChristoph Hellwig min >> ilog2(SZ_1M), max_host_mem_size_mb); 199987ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 20009620cfbaSChristoph Hellwig return 0; 200187ad72a5SChristoph Hellwig } 200287ad72a5SChristoph Hellwig 200387ad72a5SChristoph Hellwig /* 200487ad72a5SChristoph Hellwig * If we already have a buffer allocated check if we can reuse it. 200587ad72a5SChristoph Hellwig */ 200687ad72a5SChristoph Hellwig if (dev->host_mem_descs) { 200787ad72a5SChristoph Hellwig if (dev->host_mem_size >= min) 200887ad72a5SChristoph Hellwig enable_bits |= NVME_HOST_MEM_RETURN; 200987ad72a5SChristoph Hellwig else 201087ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 201187ad72a5SChristoph Hellwig } 201287ad72a5SChristoph Hellwig 201387ad72a5SChristoph Hellwig if (!dev->host_mem_descs) { 201492dc6895SChristoph Hellwig if (nvme_alloc_host_mem(dev, min, preferred)) { 201592dc6895SChristoph Hellwig dev_warn(dev->ctrl.device, 201692dc6895SChristoph Hellwig "failed to allocate host memory buffer.\n"); 20179620cfbaSChristoph Hellwig return 0; /* controller must work without HMB */ 201887ad72a5SChristoph Hellwig } 201987ad72a5SChristoph Hellwig 202092dc6895SChristoph Hellwig dev_info(dev->ctrl.device, 202192dc6895SChristoph Hellwig "allocated %lld MiB host memory buffer.\n", 202292dc6895SChristoph Hellwig dev->host_mem_size >> ilog2(SZ_1M)); 202392dc6895SChristoph Hellwig } 202492dc6895SChristoph Hellwig 20259620cfbaSChristoph Hellwig ret = nvme_set_host_mem(dev, enable_bits); 20269620cfbaSChristoph Hellwig if (ret) 202787ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 20289620cfbaSChristoph Hellwig return ret; 202957dacad5SJay Sternberg } 203057dacad5SJay Sternberg 20316451fe73SJens Axboe static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) 20323b6592f7SJens Axboe { 20333b6592f7SJens Axboe unsigned int this_w_queues = write_queues; 20343b6592f7SJens Axboe 20353b6592f7SJens Axboe /* 20363b6592f7SJens Axboe * Setup read/write queue split 20373b6592f7SJens Axboe */ 20386451fe73SJens Axboe if (irq_queues == 1) { 2039e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2040e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_READ] = 0; 20413b6592f7SJens Axboe return; 20423b6592f7SJens Axboe } 20433b6592f7SJens Axboe 20443b6592f7SJens Axboe /* 20453b6592f7SJens Axboe * If 'write_queues' is set, ensure it leaves room for at least 20463b6592f7SJens Axboe * one read queue 20473b6592f7SJens Axboe */ 20486451fe73SJens Axboe if (this_w_queues >= irq_queues) 20496451fe73SJens Axboe this_w_queues = irq_queues - 1; 20503b6592f7SJens Axboe 20513b6592f7SJens Axboe /* 20523b6592f7SJens Axboe * If 'write_queues' is set to zero, reads and writes will share 20533b6592f7SJens Axboe * a queue set. 20543b6592f7SJens Axboe */ 20553b6592f7SJens Axboe if (!this_w_queues) { 20566451fe73SJens Axboe dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; 2057e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_READ] = 0; 20583b6592f7SJens Axboe } else { 2059e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; 20606451fe73SJens Axboe dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; 20613b6592f7SJens Axboe } 20623b6592f7SJens Axboe } 20633b6592f7SJens Axboe 20646451fe73SJens Axboe static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 20653b6592f7SJens Axboe { 20663b6592f7SJens Axboe struct pci_dev *pdev = to_pci_dev(dev->dev); 20673b6592f7SJens Axboe int irq_sets[2]; 20683b6592f7SJens Axboe struct irq_affinity affd = { 20693b6592f7SJens Axboe .pre_vectors = 1, 20703b6592f7SJens Axboe .nr_sets = ARRAY_SIZE(irq_sets), 20713b6592f7SJens Axboe .sets = irq_sets, 20723b6592f7SJens Axboe }; 207330e06628SJens Axboe int result = 0; 20746451fe73SJens Axboe unsigned int irq_queues, this_p_queues; 20756451fe73SJens Axboe 20766451fe73SJens Axboe /* 20776451fe73SJens Axboe * Poll queues don't need interrupts, but we need at least one IO 20786451fe73SJens Axboe * queue left over for non-polled IO. 20796451fe73SJens Axboe */ 20806451fe73SJens Axboe this_p_queues = poll_queues; 20816451fe73SJens Axboe if (this_p_queues >= nr_io_queues) { 20826451fe73SJens Axboe this_p_queues = nr_io_queues - 1; 20836451fe73SJens Axboe irq_queues = 1; 20846451fe73SJens Axboe } else { 20856451fe73SJens Axboe irq_queues = nr_io_queues - this_p_queues; 20866451fe73SJens Axboe } 20876451fe73SJens Axboe dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; 20883b6592f7SJens Axboe 20893b6592f7SJens Axboe /* 20903b6592f7SJens Axboe * For irq sets, we have to ask for minvec == maxvec. This passes 20913b6592f7SJens Axboe * any reduction back to us, so we can adjust our queue counts and 20923b6592f7SJens Axboe * IRQ vector needs. 20933b6592f7SJens Axboe */ 20943b6592f7SJens Axboe do { 20956451fe73SJens Axboe nvme_calc_io_queues(dev, irq_queues); 2096e20ba6e1SChristoph Hellwig irq_sets[0] = dev->io_queues[HCTX_TYPE_DEFAULT]; 2097e20ba6e1SChristoph Hellwig irq_sets[1] = dev->io_queues[HCTX_TYPE_READ]; 20983b6592f7SJens Axboe if (!irq_sets[1]) 20993b6592f7SJens Axboe affd.nr_sets = 1; 21003b6592f7SJens Axboe 21013b6592f7SJens Axboe /* 2102db29eb05SJens Axboe * If we got a failure and we're down to asking for just 2103db29eb05SJens Axboe * 1 + 1 queues, just ask for a single vector. We'll share 2104db29eb05SJens Axboe * that between the single IO queue and the admin queue. 21053b6592f7SJens Axboe */ 21066451fe73SJens Axboe if (result >= 0 && irq_queues > 1) 21076451fe73SJens Axboe irq_queues = irq_sets[0] + irq_sets[1] + 1; 21083b6592f7SJens Axboe 21096451fe73SJens Axboe result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, 21106451fe73SJens Axboe irq_queues, 21113b6592f7SJens Axboe PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); 21123b6592f7SJens Axboe 21133b6592f7SJens Axboe /* 2114db29eb05SJens Axboe * Need to reduce our vec counts. If we get ENOSPC, the 2115db29eb05SJens Axboe * platform should support mulitple vecs, we just need 2116db29eb05SJens Axboe * to decrease our ask. If we get EINVAL, the platform 2117db29eb05SJens Axboe * likely does not. Back down to ask for just one vector. 21183b6592f7SJens Axboe */ 21193b6592f7SJens Axboe if (result == -ENOSPC) { 21206451fe73SJens Axboe irq_queues--; 21216451fe73SJens Axboe if (!irq_queues) 21223b6592f7SJens Axboe return result; 21233b6592f7SJens Axboe continue; 2124db29eb05SJens Axboe } else if (result == -EINVAL) { 21256451fe73SJens Axboe irq_queues = 1; 2126db29eb05SJens Axboe continue; 21273b6592f7SJens Axboe } else if (result <= 0) 21283b6592f7SJens Axboe return -EIO; 21293b6592f7SJens Axboe break; 21303b6592f7SJens Axboe } while (1); 21313b6592f7SJens Axboe 21323b6592f7SJens Axboe return result; 21333b6592f7SJens Axboe } 21343b6592f7SJens Axboe 213557dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 213657dacad5SJay Sternberg { 2137147b27e4SSagi Grimberg struct nvme_queue *adminq = &dev->queues[0]; 213857dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 213997f6ef64SXu Yu int result, nr_io_queues; 214097f6ef64SXu Yu unsigned long size; 214157dacad5SJay Sternberg 21423b6592f7SJens Axboe nr_io_queues = max_io_queues(); 21439a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 21449a0be7abSChristoph Hellwig if (result < 0) 214557dacad5SJay Sternberg return result; 21469a0be7abSChristoph Hellwig 2147f5fa90dcSChristoph Hellwig if (nr_io_queues == 0) 2148a5229050SKeith Busch return 0; 214957dacad5SJay Sternberg 21504e224106SChristoph Hellwig clear_bit(NVMEQ_ENABLED, &adminq->flags); 21514e224106SChristoph Hellwig 21520f238ff5SLogan Gunthorpe if (dev->cmb_use_sqes) { 215357dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 215457dacad5SJay Sternberg sizeof(struct nvme_command)); 215557dacad5SJay Sternberg if (result > 0) 215657dacad5SJay Sternberg dev->q_depth = result; 215757dacad5SJay Sternberg else 21580f238ff5SLogan Gunthorpe dev->cmb_use_sqes = false; 215957dacad5SJay Sternberg } 216057dacad5SJay Sternberg 216157dacad5SJay Sternberg do { 216297f6ef64SXu Yu size = db_bar_size(dev, nr_io_queues); 216397f6ef64SXu Yu result = nvme_remap_bar(dev, size); 216497f6ef64SXu Yu if (!result) 216557dacad5SJay Sternberg break; 216657dacad5SJay Sternberg if (!--nr_io_queues) 216757dacad5SJay Sternberg return -ENOMEM; 216857dacad5SJay Sternberg } while (1); 216957dacad5SJay Sternberg adminq->q_db = dev->dbs; 217057dacad5SJay Sternberg 217157dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 21720ff199cbSChristoph Hellwig pci_free_irq(pdev, 0, adminq); 217357dacad5SJay Sternberg 217457dacad5SJay Sternberg /* 217557dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 217657dacad5SJay Sternberg * setting up the full range we need. 217757dacad5SJay Sternberg */ 2178dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 21793b6592f7SJens Axboe 21803b6592f7SJens Axboe result = nvme_setup_irqs(dev, nr_io_queues); 218122b55601SKeith Busch if (result <= 0) 2182dca51e78SChristoph Hellwig return -EIO; 21833b6592f7SJens Axboe 218422b55601SKeith Busch dev->num_vecs = result; 21854b04cc6aSJens Axboe result = max(result - 1, 1); 2186e20ba6e1SChristoph Hellwig dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 218757dacad5SJay Sternberg 2188e20ba6e1SChristoph Hellwig dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 2189e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_DEFAULT], 2190e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_READ], 2191e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_POLL]); 21923b6592f7SJens Axboe 219357dacad5SJay Sternberg /* 219457dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 219557dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 219657dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 219757dacad5SJay Sternberg * number of interrupts. 219857dacad5SJay Sternberg */ 219957dacad5SJay Sternberg 2200dca51e78SChristoph Hellwig result = queue_request_irq(adminq); 220157dacad5SJay Sternberg if (result) { 220257dacad5SJay Sternberg adminq->cq_vector = -1; 2203d4875622SKeith Busch return result; 220457dacad5SJay Sternberg } 22054e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &adminq->flags); 2206749941f2SChristoph Hellwig return nvme_create_io_queues(dev); 220757dacad5SJay Sternberg } 220857dacad5SJay Sternberg 22092a842acaSChristoph Hellwig static void nvme_del_queue_end(struct request *req, blk_status_t error) 2210db3cbfffSKeith Busch { 2211db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2212db3cbfffSKeith Busch 2213db3cbfffSKeith Busch blk_mq_free_request(req); 2214d1ed6aa1SChristoph Hellwig complete(&nvmeq->delete_done); 2215db3cbfffSKeith Busch } 2216db3cbfffSKeith Busch 22172a842acaSChristoph Hellwig static void nvme_del_cq_end(struct request *req, blk_status_t error) 2218db3cbfffSKeith Busch { 2219db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2220db3cbfffSKeith Busch 2221d1ed6aa1SChristoph Hellwig if (error) 2222d1ed6aa1SChristoph Hellwig set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2223db3cbfffSKeith Busch 2224db3cbfffSKeith Busch nvme_del_queue_end(req, error); 2225db3cbfffSKeith Busch } 2226db3cbfffSKeith Busch 2227db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2228db3cbfffSKeith Busch { 2229db3cbfffSKeith Busch struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2230db3cbfffSKeith Busch struct request *req; 2231db3cbfffSKeith Busch struct nvme_command cmd; 2232db3cbfffSKeith Busch 2233db3cbfffSKeith Busch memset(&cmd, 0, sizeof(cmd)); 2234db3cbfffSKeith Busch cmd.delete_queue.opcode = opcode; 2235db3cbfffSKeith Busch cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2236db3cbfffSKeith Busch 2237eb71f435SChristoph Hellwig req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 2238db3cbfffSKeith Busch if (IS_ERR(req)) 2239db3cbfffSKeith Busch return PTR_ERR(req); 2240db3cbfffSKeith Busch 2241db3cbfffSKeith Busch req->timeout = ADMIN_TIMEOUT; 2242db3cbfffSKeith Busch req->end_io_data = nvmeq; 2243db3cbfffSKeith Busch 2244d1ed6aa1SChristoph Hellwig init_completion(&nvmeq->delete_done); 2245db3cbfffSKeith Busch blk_execute_rq_nowait(q, NULL, req, false, 2246db3cbfffSKeith Busch opcode == nvme_admin_delete_cq ? 2247db3cbfffSKeith Busch nvme_del_cq_end : nvme_del_queue_end); 2248db3cbfffSKeith Busch return 0; 2249db3cbfffSKeith Busch } 2250db3cbfffSKeith Busch 22515271edd4SChristoph Hellwig static bool nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2252db3cbfffSKeith Busch { 22535271edd4SChristoph Hellwig int nr_queues = dev->online_queues - 1, sent = 0; 2254db3cbfffSKeith Busch unsigned long timeout; 2255db3cbfffSKeith Busch 2256db3cbfffSKeith Busch retry: 2257db3cbfffSKeith Busch timeout = ADMIN_TIMEOUT; 22585271edd4SChristoph Hellwig while (nr_queues > 0) { 22595271edd4SChristoph Hellwig if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2260db3cbfffSKeith Busch break; 22615271edd4SChristoph Hellwig nr_queues--; 22625271edd4SChristoph Hellwig sent++; 22635271edd4SChristoph Hellwig } 2264d1ed6aa1SChristoph Hellwig while (sent) { 2265d1ed6aa1SChristoph Hellwig struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2266d1ed6aa1SChristoph Hellwig 2267d1ed6aa1SChristoph Hellwig timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 22685271edd4SChristoph Hellwig timeout); 2269db3cbfffSKeith Busch if (timeout == 0) 22705271edd4SChristoph Hellwig return false; 2271d1ed6aa1SChristoph Hellwig 2272d1ed6aa1SChristoph Hellwig /* handle any remaining CQEs */ 2273d1ed6aa1SChristoph Hellwig if (opcode == nvme_admin_delete_cq && 2274d1ed6aa1SChristoph Hellwig !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags)) 2275d1ed6aa1SChristoph Hellwig nvme_poll_irqdisable(nvmeq, -1); 2276d1ed6aa1SChristoph Hellwig 2277d1ed6aa1SChristoph Hellwig sent--; 22785271edd4SChristoph Hellwig if (nr_queues) 2279db3cbfffSKeith Busch goto retry; 2280db3cbfffSKeith Busch } 22815271edd4SChristoph Hellwig return true; 2282db3cbfffSKeith Busch } 2283db3cbfffSKeith Busch 228457dacad5SJay Sternberg /* 22852b1b7e78SJianchao Wang * return error value only when tagset allocation failed 228657dacad5SJay Sternberg */ 228757dacad5SJay Sternberg static int nvme_dev_add(struct nvme_dev *dev) 228857dacad5SJay Sternberg { 22892b1b7e78SJianchao Wang int ret; 22902b1b7e78SJianchao Wang 22915bae7f73SChristoph Hellwig if (!dev->ctrl.tagset) { 2292c6d962aeSChristoph Hellwig dev->tagset.ops = &nvme_mq_ops; 229357dacad5SJay Sternberg dev->tagset.nr_hw_queues = dev->online_queues - 1; 2294ed92ad37SChristoph Hellwig dev->tagset.nr_maps = 2; /* default + read */ 2295ed92ad37SChristoph Hellwig if (dev->io_queues[HCTX_TYPE_POLL]) 2296ed92ad37SChristoph Hellwig dev->tagset.nr_maps++; 2297e20ba6e1SChristoph Hellwig dev->tagset.nr_maps = HCTX_MAX_TYPES; 229857dacad5SJay Sternberg dev->tagset.timeout = NVME_IO_TIMEOUT; 229957dacad5SJay Sternberg dev->tagset.numa_node = dev_to_node(dev->dev); 230057dacad5SJay Sternberg dev->tagset.queue_depth = 230157dacad5SJay Sternberg min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; 2302a7a7cbe3SChaitanya Kulkarni dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false); 2303a7a7cbe3SChaitanya Kulkarni if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) { 2304a7a7cbe3SChaitanya Kulkarni dev->tagset.cmd_size = max(dev->tagset.cmd_size, 2305a7a7cbe3SChaitanya Kulkarni nvme_pci_cmd_size(dev, true)); 2306a7a7cbe3SChaitanya Kulkarni } 230757dacad5SJay Sternberg dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; 230857dacad5SJay Sternberg dev->tagset.driver_data = dev; 230957dacad5SJay Sternberg 23102b1b7e78SJianchao Wang ret = blk_mq_alloc_tag_set(&dev->tagset); 23112b1b7e78SJianchao Wang if (ret) { 23122b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 23132b1b7e78SJianchao Wang "IO queues tagset allocation failed %d\n", ret); 23142b1b7e78SJianchao Wang return ret; 23152b1b7e78SJianchao Wang } 23165bae7f73SChristoph Hellwig dev->ctrl.tagset = &dev->tagset; 2317f9f38e33SHelen Koike 2318f9f38e33SHelen Koike nvme_dbbuf_set(dev); 2319949928c1SKeith Busch } else { 2320949928c1SKeith Busch blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 2321949928c1SKeith Busch 2322949928c1SKeith Busch /* Free previously allocated queues that are no longer usable */ 2323949928c1SKeith Busch nvme_free_queues(dev, dev->online_queues); 232457dacad5SJay Sternberg } 2325949928c1SKeith Busch 232657dacad5SJay Sternberg return 0; 232757dacad5SJay Sternberg } 232857dacad5SJay Sternberg 2329b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev) 233057dacad5SJay Sternberg { 2331b00a726aSKeith Busch int result = -ENOMEM; 233257dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 233357dacad5SJay Sternberg 233457dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 233557dacad5SJay Sternberg return result; 233657dacad5SJay Sternberg 233757dacad5SJay Sternberg pci_set_master(pdev); 233857dacad5SJay Sternberg 233957dacad5SJay Sternberg if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 234057dacad5SJay Sternberg dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) 234157dacad5SJay Sternberg goto disable; 234257dacad5SJay Sternberg 23437a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 234457dacad5SJay Sternberg result = -ENODEV; 2345b00a726aSKeith Busch goto disable; 234657dacad5SJay Sternberg } 234757dacad5SJay Sternberg 234857dacad5SJay Sternberg /* 2349a5229050SKeith Busch * Some devices and/or platforms don't advertise or work with INTx 2350a5229050SKeith Busch * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2351a5229050SKeith Busch * adjust this later. 235257dacad5SJay Sternberg */ 2353dca51e78SChristoph Hellwig result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 2354dca51e78SChristoph Hellwig if (result < 0) 2355dca51e78SChristoph Hellwig return result; 235657dacad5SJay Sternberg 235720d0dfe6SSagi Grimberg dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 23587a67cbeaSChristoph Hellwig 235920d0dfe6SSagi Grimberg dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2360b27c1e68Sweiping zhang io_queue_depth); 236120d0dfe6SSagi Grimberg dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 23627a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 23631f390c1fSStephan Günther 23641f390c1fSStephan Günther /* 23651f390c1fSStephan Günther * Temporary fix for the Apple controller found in the MacBook8,1 and 23661f390c1fSStephan Günther * some MacBook7,1 to avoid controller resets and data loss. 23671f390c1fSStephan Günther */ 23681f390c1fSStephan Günther if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 23691f390c1fSStephan Günther dev->q_depth = 2; 23709bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " 23719bdcfb10SChristoph Hellwig "set queue depth=%u to work around controller resets\n", 23721f390c1fSStephan Günther dev->q_depth); 2373d554b5e1SMartin K. Petersen } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2374d554b5e1SMartin K. Petersen (pdev->device == 0xa821 || pdev->device == 0xa822) && 237520d0dfe6SSagi Grimberg NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2376d554b5e1SMartin K. Petersen dev->q_depth = 64; 2377d554b5e1SMartin K. Petersen dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2378d554b5e1SMartin K. Petersen "set queue depth=%u\n", dev->q_depth); 23791f390c1fSStephan Günther } 23801f390c1fSStephan Günther 2381f65efd6dSChristoph Hellwig nvme_map_cmb(dev); 2382202021c1SStephen Bates 2383a0a3408eSKeith Busch pci_enable_pcie_error_reporting(pdev); 2384a0a3408eSKeith Busch pci_save_state(pdev); 238557dacad5SJay Sternberg return 0; 238657dacad5SJay Sternberg 238757dacad5SJay Sternberg disable: 238857dacad5SJay Sternberg pci_disable_device(pdev); 238957dacad5SJay Sternberg return result; 239057dacad5SJay Sternberg } 239157dacad5SJay Sternberg 239257dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 239357dacad5SJay Sternberg { 2394b00a726aSKeith Busch if (dev->bar) 2395b00a726aSKeith Busch iounmap(dev->bar); 2396a1f447b3SJohannes Thumshirn pci_release_mem_regions(to_pci_dev(dev->dev)); 2397b00a726aSKeith Busch } 2398b00a726aSKeith Busch 2399b00a726aSKeith Busch static void nvme_pci_disable(struct nvme_dev *dev) 2400b00a726aSKeith Busch { 240157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 240257dacad5SJay Sternberg 2403dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 240457dacad5SJay Sternberg 2405a0a3408eSKeith Busch if (pci_is_enabled(pdev)) { 2406a0a3408eSKeith Busch pci_disable_pcie_error_reporting(pdev); 240757dacad5SJay Sternberg pci_disable_device(pdev); 240857dacad5SJay Sternberg } 2409a0a3408eSKeith Busch } 241057dacad5SJay Sternberg 2411a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 241257dacad5SJay Sternberg { 2413ee9aebb2SKeith Busch int i; 2414302ad8ccSKeith Busch bool dead = true; 2415302ad8ccSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 241657dacad5SJay Sternberg 241777bf25eaSKeith Busch mutex_lock(&dev->shutdown_lock); 2418302ad8ccSKeith Busch if (pci_is_enabled(pdev)) { 2419302ad8ccSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 2420302ad8ccSKeith Busch 2421ebef7368SKeith Busch if (dev->ctrl.state == NVME_CTRL_LIVE || 2422ebef7368SKeith Busch dev->ctrl.state == NVME_CTRL_RESETTING) 2423302ad8ccSKeith Busch nvme_start_freeze(&dev->ctrl); 2424302ad8ccSKeith Busch dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 2425302ad8ccSKeith Busch pdev->error_state != pci_channel_io_normal); 242657dacad5SJay Sternberg } 2427c21377f8SGabriel Krisman Bertazi 2428302ad8ccSKeith Busch /* 2429302ad8ccSKeith Busch * Give the controller a chance to complete all entered requests if 2430302ad8ccSKeith Busch * doing a safe shutdown. 2431302ad8ccSKeith Busch */ 243287ad72a5SChristoph Hellwig if (!dead) { 243387ad72a5SChristoph Hellwig if (shutdown) 2434302ad8ccSKeith Busch nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 24359a915a5bSJianchao Wang } 243687ad72a5SChristoph Hellwig 24379a915a5bSJianchao Wang nvme_stop_queues(&dev->ctrl); 24389a915a5bSJianchao Wang 243964ee0ac0SKeith Busch if (!dead && dev->ctrl.queue_count > 0) { 24405271edd4SChristoph Hellwig if (nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 24415271edd4SChristoph Hellwig nvme_disable_io_queues(dev, nvme_admin_delete_cq); 2442a5cdb68cSKeith Busch nvme_disable_admin_queue(dev, shutdown); 244357dacad5SJay Sternberg } 2444ee9aebb2SKeith Busch for (i = dev->ctrl.queue_count - 1; i >= 0; i--) 2445ee9aebb2SKeith Busch nvme_suspend_queue(&dev->queues[i]); 2446ee9aebb2SKeith Busch 2447b00a726aSKeith Busch nvme_pci_disable(dev); 244857dacad5SJay Sternberg 2449e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 2450e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); 2451302ad8ccSKeith Busch 2452302ad8ccSKeith Busch /* 2453302ad8ccSKeith Busch * The driver will not be starting up queues again if shutting down so 2454302ad8ccSKeith Busch * must flush all entered requests to their failed completion to avoid 2455302ad8ccSKeith Busch * deadlocking blk-mq hot-cpu notifier. 2456302ad8ccSKeith Busch */ 2457302ad8ccSKeith Busch if (shutdown) 2458302ad8ccSKeith Busch nvme_start_queues(&dev->ctrl); 245977bf25eaSKeith Busch mutex_unlock(&dev->shutdown_lock); 246057dacad5SJay Sternberg } 246157dacad5SJay Sternberg 246257dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 246357dacad5SJay Sternberg { 246457dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 246557dacad5SJay Sternberg PAGE_SIZE, PAGE_SIZE, 0); 246657dacad5SJay Sternberg if (!dev->prp_page_pool) 246757dacad5SJay Sternberg return -ENOMEM; 246857dacad5SJay Sternberg 246957dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 247057dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 247157dacad5SJay Sternberg 256, 256, 0); 247257dacad5SJay Sternberg if (!dev->prp_small_pool) { 247357dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 247457dacad5SJay Sternberg return -ENOMEM; 247557dacad5SJay Sternberg } 247657dacad5SJay Sternberg return 0; 247757dacad5SJay Sternberg } 247857dacad5SJay Sternberg 247957dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 248057dacad5SJay Sternberg { 248157dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 248257dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 248357dacad5SJay Sternberg } 248457dacad5SJay Sternberg 24851673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 248657dacad5SJay Sternberg { 24871673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 248857dacad5SJay Sternberg 2489f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 249057dacad5SJay Sternberg put_device(dev->dev); 249157dacad5SJay Sternberg if (dev->tagset.tags) 249257dacad5SJay Sternberg blk_mq_free_tag_set(&dev->tagset); 24931c63dc66SChristoph Hellwig if (dev->ctrl.admin_q) 24941c63dc66SChristoph Hellwig blk_put_queue(dev->ctrl.admin_q); 249557dacad5SJay Sternberg kfree(dev->queues); 2496e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2497943e942eSJens Axboe mempool_destroy(dev->iod_mempool); 249857dacad5SJay Sternberg kfree(dev); 249957dacad5SJay Sternberg } 250057dacad5SJay Sternberg 2501f58944e2SKeith Busch static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status) 2502f58944e2SKeith Busch { 2503237045fcSLinus Torvalds dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status); 2504f58944e2SKeith Busch 2505d22524a4SChristoph Hellwig nvme_get_ctrl(&dev->ctrl); 250669d9a99cSKeith Busch nvme_dev_disable(dev, false); 25079f9cafc1SJianchao Wang nvme_kill_queues(&dev->ctrl); 250803e0f3a6SMing Lei if (!queue_work(nvme_wq, &dev->remove_work)) 2509f58944e2SKeith Busch nvme_put_ctrl(&dev->ctrl); 2510f58944e2SKeith Busch } 2511f58944e2SKeith Busch 2512fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work) 251357dacad5SJay Sternberg { 2514d86c4d8eSChristoph Hellwig struct nvme_dev *dev = 2515d86c4d8eSChristoph Hellwig container_of(work, struct nvme_dev, ctrl.reset_work); 2516a98e58e5SScott Bauer bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2517f58944e2SKeith Busch int result = -ENODEV; 25182b1b7e78SJianchao Wang enum nvme_ctrl_state new_state = NVME_CTRL_LIVE; 251957dacad5SJay Sternberg 252082b057caSRakesh Pandit if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) 2521fd634f41SChristoph Hellwig goto out; 2522fd634f41SChristoph Hellwig 2523fd634f41SChristoph Hellwig /* 2524fd634f41SChristoph Hellwig * If we're called to reset a live controller first shut it down before 2525fd634f41SChristoph Hellwig * moving on. 2526fd634f41SChristoph Hellwig */ 2527b00a726aSKeith Busch if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2528a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2529fd634f41SChristoph Hellwig 2530ad70062cSJianchao Wang /* 2531ad6a0a52SMax Gurtovoy * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 2532ad70062cSJianchao Wang * initializing procedure here. 2533ad70062cSJianchao Wang */ 2534ad6a0a52SMax Gurtovoy if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 2535ad70062cSJianchao Wang dev_warn(dev->ctrl.device, 2536ad6a0a52SMax Gurtovoy "failed to mark controller CONNECTING\n"); 2537ad70062cSJianchao Wang goto out; 2538ad70062cSJianchao Wang } 2539ad70062cSJianchao Wang 2540b00a726aSKeith Busch result = nvme_pci_enable(dev); 254157dacad5SJay Sternberg if (result) 254257dacad5SJay Sternberg goto out; 254357dacad5SJay Sternberg 254401ad0990SSagi Grimberg result = nvme_pci_configure_admin_queue(dev); 254557dacad5SJay Sternberg if (result) 2546f58944e2SKeith Busch goto out; 254757dacad5SJay Sternberg 254857dacad5SJay Sternberg result = nvme_alloc_admin_tags(dev); 254957dacad5SJay Sternberg if (result) 2550f58944e2SKeith Busch goto out; 255157dacad5SJay Sternberg 2552943e942eSJens Axboe /* 2553943e942eSJens Axboe * Limit the max command size to prevent iod->sg allocations going 2554943e942eSJens Axboe * over a single page. 2555943e942eSJens Axboe */ 2556943e942eSJens Axboe dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; 2557943e942eSJens Axboe dev->ctrl.max_segments = NVME_MAX_SEGS; 2558943e942eSJens Axboe 2559ce4541f4SChristoph Hellwig result = nvme_init_identify(&dev->ctrl); 2560ce4541f4SChristoph Hellwig if (result) 2561f58944e2SKeith Busch goto out; 2562ce4541f4SChristoph Hellwig 2563e286bcfcSScott Bauer if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { 2564e286bcfcSScott Bauer if (!dev->ctrl.opal_dev) 25654f1244c8SChristoph Hellwig dev->ctrl.opal_dev = 25664f1244c8SChristoph Hellwig init_opal_dev(&dev->ctrl, &nvme_sec_submit); 2567e286bcfcSScott Bauer else if (was_suspend) 25684f1244c8SChristoph Hellwig opal_unlock_from_suspend(dev->ctrl.opal_dev); 2569e286bcfcSScott Bauer } else { 2570e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2571e286bcfcSScott Bauer dev->ctrl.opal_dev = NULL; 2572e286bcfcSScott Bauer } 2573a98e58e5SScott Bauer 2574f9f38e33SHelen Koike if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { 2575f9f38e33SHelen Koike result = nvme_dbbuf_dma_alloc(dev); 2576f9f38e33SHelen Koike if (result) 2577f9f38e33SHelen Koike dev_warn(dev->dev, 2578f9f38e33SHelen Koike "unable to allocate dma for dbbuf\n"); 2579f9f38e33SHelen Koike } 2580f9f38e33SHelen Koike 25819620cfbaSChristoph Hellwig if (dev->ctrl.hmpre) { 25829620cfbaSChristoph Hellwig result = nvme_setup_host_mem(dev); 25839620cfbaSChristoph Hellwig if (result < 0) 25849620cfbaSChristoph Hellwig goto out; 25859620cfbaSChristoph Hellwig } 258687ad72a5SChristoph Hellwig 258757dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 258857dacad5SJay Sternberg if (result) 2589f58944e2SKeith Busch goto out; 259057dacad5SJay Sternberg 259121f033f7SKeith Busch /* 259257dacad5SJay Sternberg * Keep the controller around but remove all namespaces if we don't have 259357dacad5SJay Sternberg * any working I/O queue. 259457dacad5SJay Sternberg */ 259557dacad5SJay Sternberg if (dev->online_queues < 2) { 25961b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, "IO queues not created\n"); 25973b24774eSKeith Busch nvme_kill_queues(&dev->ctrl); 25985bae7f73SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 25992b1b7e78SJianchao Wang new_state = NVME_CTRL_ADMIN_ONLY; 260057dacad5SJay Sternberg } else { 260125646264SKeith Busch nvme_start_queues(&dev->ctrl); 2602302ad8ccSKeith Busch nvme_wait_freeze(&dev->ctrl); 26032b1b7e78SJianchao Wang /* hit this only when allocate tagset fails */ 26042b1b7e78SJianchao Wang if (nvme_dev_add(dev)) 26052b1b7e78SJianchao Wang new_state = NVME_CTRL_ADMIN_ONLY; 2606302ad8ccSKeith Busch nvme_unfreeze(&dev->ctrl); 260757dacad5SJay Sternberg } 260857dacad5SJay Sternberg 26092b1b7e78SJianchao Wang /* 26102b1b7e78SJianchao Wang * If only admin queue live, keep it to do further investigation or 26112b1b7e78SJianchao Wang * recovery. 26122b1b7e78SJianchao Wang */ 26132b1b7e78SJianchao Wang if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) { 26142b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 26152b1b7e78SJianchao Wang "failed to mark controller state %d\n", new_state); 2616bb8d261eSChristoph Hellwig goto out; 2617bb8d261eSChristoph Hellwig } 261892911a55SChristoph Hellwig 2619d09f2b45SSagi Grimberg nvme_start_ctrl(&dev->ctrl); 262057dacad5SJay Sternberg return; 262157dacad5SJay Sternberg 262257dacad5SJay Sternberg out: 2623f58944e2SKeith Busch nvme_remove_dead_ctrl(dev, result); 262457dacad5SJay Sternberg } 262557dacad5SJay Sternberg 26265c8809e6SChristoph Hellwig static void nvme_remove_dead_ctrl_work(struct work_struct *work) 262757dacad5SJay Sternberg { 26285c8809e6SChristoph Hellwig struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 262957dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 263057dacad5SJay Sternberg 263157dacad5SJay Sternberg if (pci_get_drvdata(pdev)) 2632921920abSKeith Busch device_release_driver(&pdev->dev); 26331673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 263457dacad5SJay Sternberg } 263557dacad5SJay Sternberg 26361c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 263757dacad5SJay Sternberg { 26381c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 26391c63dc66SChristoph Hellwig return 0; 264057dacad5SJay Sternberg } 26411c63dc66SChristoph Hellwig 26425fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 26435fd4ce1bSChristoph Hellwig { 26445fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 26455fd4ce1bSChristoph Hellwig return 0; 26465fd4ce1bSChristoph Hellwig } 26475fd4ce1bSChristoph Hellwig 26487fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 26497fd8930fSChristoph Hellwig { 26507fd8930fSChristoph Hellwig *val = readq(to_nvme_dev(ctrl)->bar + off); 26517fd8930fSChristoph Hellwig return 0; 26527fd8930fSChristoph Hellwig } 26537fd8930fSChristoph Hellwig 265497c12223SKeith Busch static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 265597c12223SKeith Busch { 265697c12223SKeith Busch struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 265797c12223SKeith Busch 265897c12223SKeith Busch return snprintf(buf, size, "%s", dev_name(&pdev->dev)); 265997c12223SKeith Busch } 266097c12223SKeith Busch 26611c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 26621a353d85SMing Lin .name = "pcie", 2663e439bb12SSagi Grimberg .module = THIS_MODULE, 2664e0596ab2SLogan Gunthorpe .flags = NVME_F_METADATA_SUPPORTED | 2665e0596ab2SLogan Gunthorpe NVME_F_PCI_P2PDMA, 26661c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 26675fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 26687fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 26691673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 2670f866fc42SChristoph Hellwig .submit_async_event = nvme_pci_submit_async_event, 267197c12223SKeith Busch .get_address = nvme_pci_get_address, 26721c63dc66SChristoph Hellwig }; 267357dacad5SJay Sternberg 2674b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev) 2675b00a726aSKeith Busch { 2676b00a726aSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 2677b00a726aSKeith Busch 2678a1f447b3SJohannes Thumshirn if (pci_request_mem_regions(pdev, "nvme")) 2679b00a726aSKeith Busch return -ENODEV; 2680b00a726aSKeith Busch 268197f6ef64SXu Yu if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 2682b00a726aSKeith Busch goto release; 2683b00a726aSKeith Busch 2684b00a726aSKeith Busch return 0; 2685b00a726aSKeith Busch release: 2686a1f447b3SJohannes Thumshirn pci_release_mem_regions(pdev); 2687b00a726aSKeith Busch return -ENODEV; 2688b00a726aSKeith Busch } 2689b00a726aSKeith Busch 26908427bbc2SKai-Heng Feng static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 2691ff5350a8SAndy Lutomirski { 2692ff5350a8SAndy Lutomirski if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 2693ff5350a8SAndy Lutomirski /* 2694ff5350a8SAndy Lutomirski * Several Samsung devices seem to drop off the PCIe bus 2695ff5350a8SAndy Lutomirski * randomly when APST is on and uses the deepest sleep state. 2696ff5350a8SAndy Lutomirski * This has been observed on a Samsung "SM951 NVMe SAMSUNG 2697ff5350a8SAndy Lutomirski * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 2698ff5350a8SAndy Lutomirski * 950 PRO 256GB", but it seems to be restricted to two Dell 2699ff5350a8SAndy Lutomirski * laptops. 2700ff5350a8SAndy Lutomirski */ 2701ff5350a8SAndy Lutomirski if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 2702ff5350a8SAndy Lutomirski (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 2703ff5350a8SAndy Lutomirski dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 2704ff5350a8SAndy Lutomirski return NVME_QUIRK_NO_DEEPEST_PS; 27058427bbc2SKai-Heng Feng } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 27068427bbc2SKai-Heng Feng /* 27078427bbc2SKai-Heng Feng * Samsung SSD 960 EVO drops off the PCIe bus after system 2708467c77d4SJarosław Janik * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 2709467c77d4SJarosław Janik * within few minutes after bootup on a Coffee Lake board - 2710467c77d4SJarosław Janik * ASUS PRIME Z370-A 27118427bbc2SKai-Heng Feng */ 27128427bbc2SKai-Heng Feng if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 2713467c77d4SJarosław Janik (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 2714467c77d4SJarosław Janik dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 27158427bbc2SKai-Heng Feng return NVME_QUIRK_NO_APST; 2716ff5350a8SAndy Lutomirski } 2717ff5350a8SAndy Lutomirski 2718ff5350a8SAndy Lutomirski return 0; 2719ff5350a8SAndy Lutomirski } 2720ff5350a8SAndy Lutomirski 272118119775SKeith Busch static void nvme_async_probe(void *data, async_cookie_t cookie) 272218119775SKeith Busch { 272318119775SKeith Busch struct nvme_dev *dev = data; 272480f513b5SKeith Busch 272518119775SKeith Busch nvme_reset_ctrl_sync(&dev->ctrl); 272618119775SKeith Busch flush_work(&dev->ctrl.scan_work); 272780f513b5SKeith Busch nvme_put_ctrl(&dev->ctrl); 272818119775SKeith Busch } 272918119775SKeith Busch 273057dacad5SJay Sternberg static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 273157dacad5SJay Sternberg { 273257dacad5SJay Sternberg int node, result = -ENOMEM; 273357dacad5SJay Sternberg struct nvme_dev *dev; 2734ff5350a8SAndy Lutomirski unsigned long quirks = id->driver_data; 2735943e942eSJens Axboe size_t alloc_size; 273657dacad5SJay Sternberg 273757dacad5SJay Sternberg node = dev_to_node(&pdev->dev); 273857dacad5SJay Sternberg if (node == NUMA_NO_NODE) 27392fa84351SMasayoshi Mizuma set_dev_node(&pdev->dev, first_memory_node); 274057dacad5SJay Sternberg 274157dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 274257dacad5SJay Sternberg if (!dev) 274357dacad5SJay Sternberg return -ENOMEM; 2744147b27e4SSagi Grimberg 27453b6592f7SJens Axboe dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue), 27463b6592f7SJens Axboe GFP_KERNEL, node); 274757dacad5SJay Sternberg if (!dev->queues) 274857dacad5SJay Sternberg goto free; 274957dacad5SJay Sternberg 275057dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 275157dacad5SJay Sternberg pci_set_drvdata(pdev, dev); 275257dacad5SJay Sternberg 2753b00a726aSKeith Busch result = nvme_dev_map(dev); 2754b00a726aSKeith Busch if (result) 2755b00c9b7aSChristophe JAILLET goto put_pci; 2756b00a726aSKeith Busch 2757d86c4d8eSChristoph Hellwig INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 27585c8809e6SChristoph Hellwig INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 275977bf25eaSKeith Busch mutex_init(&dev->shutdown_lock); 2760f3ca80fcSChristoph Hellwig 2761f3ca80fcSChristoph Hellwig result = nvme_setup_prp_pools(dev); 2762f3ca80fcSChristoph Hellwig if (result) 2763b00c9b7aSChristophe JAILLET goto unmap; 2764f3ca80fcSChristoph Hellwig 27658427bbc2SKai-Heng Feng quirks |= check_vendor_combination_bug(pdev); 2766ff5350a8SAndy Lutomirski 2767943e942eSJens Axboe /* 2768943e942eSJens Axboe * Double check that our mempool alloc size will cover the biggest 2769943e942eSJens Axboe * command we support. 2770943e942eSJens Axboe */ 2771943e942eSJens Axboe alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ, 2772943e942eSJens Axboe NVME_MAX_SEGS, true); 2773943e942eSJens Axboe WARN_ON_ONCE(alloc_size > PAGE_SIZE); 2774943e942eSJens Axboe 2775943e942eSJens Axboe dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, 2776943e942eSJens Axboe mempool_kfree, 2777943e942eSJens Axboe (void *) alloc_size, 2778943e942eSJens Axboe GFP_KERNEL, node); 2779943e942eSJens Axboe if (!dev->iod_mempool) { 2780943e942eSJens Axboe result = -ENOMEM; 2781943e942eSJens Axboe goto release_pools; 2782943e942eSJens Axboe } 2783943e942eSJens Axboe 2784b6e44b4cSKeith Busch result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2785b6e44b4cSKeith Busch quirks); 2786b6e44b4cSKeith Busch if (result) 2787b6e44b4cSKeith Busch goto release_mempool; 2788b6e44b4cSKeith Busch 27891b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 27901b3c47c1SSagi Grimberg 279180f513b5SKeith Busch nvme_get_ctrl(&dev->ctrl); 279218119775SKeith Busch async_schedule(nvme_async_probe, dev); 27934caff8fcSSagi Grimberg 279457dacad5SJay Sternberg return 0; 279557dacad5SJay Sternberg 2796b6e44b4cSKeith Busch release_mempool: 2797b6e44b4cSKeith Busch mempool_destroy(dev->iod_mempool); 279857dacad5SJay Sternberg release_pools: 279957dacad5SJay Sternberg nvme_release_prp_pools(dev); 2800b00c9b7aSChristophe JAILLET unmap: 2801b00c9b7aSChristophe JAILLET nvme_dev_unmap(dev); 280257dacad5SJay Sternberg put_pci: 280357dacad5SJay Sternberg put_device(dev->dev); 280457dacad5SJay Sternberg free: 280557dacad5SJay Sternberg kfree(dev->queues); 280657dacad5SJay Sternberg kfree(dev); 280757dacad5SJay Sternberg return result; 280857dacad5SJay Sternberg } 280957dacad5SJay Sternberg 2810775755edSChristoph Hellwig static void nvme_reset_prepare(struct pci_dev *pdev) 281157dacad5SJay Sternberg { 281257dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 2813a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2814775755edSChristoph Hellwig } 281557dacad5SJay Sternberg 2816775755edSChristoph Hellwig static void nvme_reset_done(struct pci_dev *pdev) 2817775755edSChristoph Hellwig { 2818f263fbb8SLinus Torvalds struct nvme_dev *dev = pci_get_drvdata(pdev); 281979c48ccfSSagi Grimberg nvme_reset_ctrl_sync(&dev->ctrl); 282057dacad5SJay Sternberg } 282157dacad5SJay Sternberg 282257dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 282357dacad5SJay Sternberg { 282457dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 2825a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 282657dacad5SJay Sternberg } 282757dacad5SJay Sternberg 2828f58944e2SKeith Busch /* 2829f58944e2SKeith Busch * The driver's remove may be called on a device in a partially initialized 2830f58944e2SKeith Busch * state. This function must not have any dependencies on the device state in 2831f58944e2SKeith Busch * order to proceed. 2832f58944e2SKeith Busch */ 283357dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 283457dacad5SJay Sternberg { 283557dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 283657dacad5SJay Sternberg 2837bb8d261eSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 283857dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 28390ff9d4e1SKeith Busch 28406db28edaSKeith Busch if (!pci_device_is_present(pdev)) { 28410ff9d4e1SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 28421d39e692SKeith Busch nvme_dev_disable(dev, true); 2843cb4bfda6SKeith Busch nvme_dev_remove_admin(dev); 28446db28edaSKeith Busch } 28450ff9d4e1SKeith Busch 2846d86c4d8eSChristoph Hellwig flush_work(&dev->ctrl.reset_work); 2847d09f2b45SSagi Grimberg nvme_stop_ctrl(&dev->ctrl); 2848d09f2b45SSagi Grimberg nvme_remove_namespaces(&dev->ctrl); 2849a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 28509fe5c59fSKeith Busch nvme_release_cmb(dev); 285187ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 285257dacad5SJay Sternberg nvme_dev_remove_admin(dev); 285357dacad5SJay Sternberg nvme_free_queues(dev, 0); 2854d09f2b45SSagi Grimberg nvme_uninit_ctrl(&dev->ctrl); 285557dacad5SJay Sternberg nvme_release_prp_pools(dev); 2856b00a726aSKeith Busch nvme_dev_unmap(dev); 28571673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 285857dacad5SJay Sternberg } 285957dacad5SJay Sternberg 286057dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 286157dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 286257dacad5SJay Sternberg { 286357dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 286457dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 286557dacad5SJay Sternberg 2866a5cdb68cSKeith Busch nvme_dev_disable(ndev, true); 286757dacad5SJay Sternberg return 0; 286857dacad5SJay Sternberg } 286957dacad5SJay Sternberg 287057dacad5SJay Sternberg static int nvme_resume(struct device *dev) 287157dacad5SJay Sternberg { 287257dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 287357dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 287457dacad5SJay Sternberg 2875d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&ndev->ctrl); 287657dacad5SJay Sternberg return 0; 287757dacad5SJay Sternberg } 287857dacad5SJay Sternberg #endif 287957dacad5SJay Sternberg 288057dacad5SJay Sternberg static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); 288157dacad5SJay Sternberg 2882a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 2883a0a3408eSKeith Busch pci_channel_state_t state) 2884a0a3408eSKeith Busch { 2885a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 2886a0a3408eSKeith Busch 2887a0a3408eSKeith Busch /* 2888a0a3408eSKeith Busch * A frozen channel requires a reset. When detected, this method will 2889a0a3408eSKeith Busch * shutdown the controller to quiesce. The controller will be restarted 2890a0a3408eSKeith Busch * after the slot reset through driver's slot_reset callback. 2891a0a3408eSKeith Busch */ 2892a0a3408eSKeith Busch switch (state) { 2893a0a3408eSKeith Busch case pci_channel_io_normal: 2894a0a3408eSKeith Busch return PCI_ERS_RESULT_CAN_RECOVER; 2895a0a3408eSKeith Busch case pci_channel_io_frozen: 2896d011fb31SKeith Busch dev_warn(dev->ctrl.device, 2897d011fb31SKeith Busch "frozen state error detected, reset controller\n"); 2898a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2899a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 2900a0a3408eSKeith Busch case pci_channel_io_perm_failure: 2901d011fb31SKeith Busch dev_warn(dev->ctrl.device, 2902d011fb31SKeith Busch "failure state error detected, request disconnect\n"); 2903a0a3408eSKeith Busch return PCI_ERS_RESULT_DISCONNECT; 2904a0a3408eSKeith Busch } 2905a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 2906a0a3408eSKeith Busch } 2907a0a3408eSKeith Busch 2908a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 2909a0a3408eSKeith Busch { 2910a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 2911a0a3408eSKeith Busch 29121b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "restart after slot reset\n"); 2913a0a3408eSKeith Busch pci_restore_state(pdev); 2914d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 2915a0a3408eSKeith Busch return PCI_ERS_RESULT_RECOVERED; 2916a0a3408eSKeith Busch } 2917a0a3408eSKeith Busch 2918a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev) 2919a0a3408eSKeith Busch { 292072cd4cc2SKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 292172cd4cc2SKeith Busch 292272cd4cc2SKeith Busch flush_work(&dev->ctrl.reset_work); 2923a0a3408eSKeith Busch } 2924a0a3408eSKeith Busch 292557dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 292657dacad5SJay Sternberg .error_detected = nvme_error_detected, 292757dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 292857dacad5SJay Sternberg .resume = nvme_error_resume, 2929775755edSChristoph Hellwig .reset_prepare = nvme_reset_prepare, 2930775755edSChristoph Hellwig .reset_done = nvme_reset_done, 293157dacad5SJay Sternberg }; 293257dacad5SJay Sternberg 293357dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 2934106198edSChristoph Hellwig { PCI_VDEVICE(INTEL, 0x0953), 293508095e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2936e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 293799466e70SKeith Busch { PCI_VDEVICE(INTEL, 0x0a53), 293899466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2939e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 294099466e70SKeith Busch { PCI_VDEVICE(INTEL, 0x0a54), 294199466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2942e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 2943f99cb7afSDavid Wayne Fugate { PCI_VDEVICE(INTEL, 0x0a55), 2944f99cb7afSDavid Wayne Fugate .driver_data = NVME_QUIRK_STRIPE_SIZE | 2945f99cb7afSDavid Wayne Fugate NVME_QUIRK_DEALLOCATE_ZEROES, }, 294650af47d0SAndy Lutomirski { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 29479abd68efSJens Axboe .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 29489abd68efSJens Axboe NVME_QUIRK_MEDIUM_PRIO_SQ }, 2949540c801cSKeith Busch { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2950540c801cSKeith Busch .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 29510302ae60SMicah Parrish { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 29520302ae60SMicah Parrish .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 295354adc010SGuilherme G. Piccoli { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 295454adc010SGuilherme G. Piccoli .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 29558c97eeccSJeff Lien { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 29568c97eeccSJeff Lien .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2957015282c9SWenbo Wang { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 2958015282c9SWenbo Wang .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2959d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 2960d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2961d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 2962d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2963608cc4b1SChristoph Hellwig { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ 2964608cc4b1SChristoph Hellwig .driver_data = NVME_QUIRK_LIGHTNVM, }, 2965608cc4b1SChristoph Hellwig { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ 2966608cc4b1SChristoph Hellwig .driver_data = NVME_QUIRK_LIGHTNVM, }, 2967ea48e877SWei Xu { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ 2968ea48e877SWei Xu .driver_data = NVME_QUIRK_LIGHTNVM, }, 296957dacad5SJay Sternberg { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2970c74dc780SStephan Günther { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, 2971124298bdSDaniel Roschka { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 297257dacad5SJay Sternberg { 0, } 297357dacad5SJay Sternberg }; 297457dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 297557dacad5SJay Sternberg 297657dacad5SJay Sternberg static struct pci_driver nvme_driver = { 297757dacad5SJay Sternberg .name = "nvme", 297857dacad5SJay Sternberg .id_table = nvme_id_table, 297957dacad5SJay Sternberg .probe = nvme_probe, 298057dacad5SJay Sternberg .remove = nvme_remove, 298157dacad5SJay Sternberg .shutdown = nvme_shutdown, 298257dacad5SJay Sternberg .driver = { 298357dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 298457dacad5SJay Sternberg }, 298574d986abSAlexander Duyck .sriov_configure = pci_sriov_configure_simple, 298657dacad5SJay Sternberg .err_handler = &nvme_err_handler, 298757dacad5SJay Sternberg }; 298857dacad5SJay Sternberg 298957dacad5SJay Sternberg static int __init nvme_init(void) 299057dacad5SJay Sternberg { 29919a6327d2SSagi Grimberg return pci_register_driver(&nvme_driver); 299257dacad5SJay Sternberg } 299357dacad5SJay Sternberg 299457dacad5SJay Sternberg static void __exit nvme_exit(void) 299557dacad5SJay Sternberg { 299657dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 299703e0f3a6SMing Lei flush_workqueue(nvme_wq); 299857dacad5SJay Sternberg _nvme_check_size(); 299957dacad5SJay Sternberg } 300057dacad5SJay Sternberg 300157dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 300257dacad5SJay Sternberg MODULE_LICENSE("GPL"); 300357dacad5SJay Sternberg MODULE_VERSION("1.0"); 300457dacad5SJay Sternberg module_init(nvme_init); 300557dacad5SJay Sternberg module_exit(nvme_exit); 3006